From ca7ccbe2ea6be042f90f31eb75ad4dca032dbed1 Mon Sep 17 00:00:00 2001 From: Peter Dillinger Date: Thu, 24 Oct 2019 17:14:27 -0700 Subject: [PATCH] Misc hashing updates / upgrades (#5909) Summary: - Updated our included xxhash implementation to version 0.7.2 (== the latest dev version as of 2019-10-09). - Using XXH_NAMESPACE (like other fb projects) to avoid potential name collisions. - Added fastrange64, and unit tests for it and fastrange32. These are faster alternatives to hash % range. - Use preview version of XXH3 instead of MurmurHash64A for NPHash64 -- Had to update cache_test to increase probability of passing for any given hash function. - Use fastrange64 instead of % with uses of NPHash64 -- Had to fix WritePreparedTransactionTest.CommitOfDelayedPrepared to avoid deadlock apparently caused by new hash collision. - Set default seed for NPHash64 because specifying a seed rarely makes sense for it. - Removed unnecessary include xxhash.h in a popular .h file - Rename preview version of XXH3 to XXH3p for clarity and to ease backward compatibility in case final version of XXH3 is integrated. Relying on existing unit tests for NPHash64-related changes. Each new implementation of fastrange64 passed unit tests when manipulating my local build to select it. I haven't done any integration performance tests, but I consider the improved performance of the pieces being swapped in to be well established. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5909 Differential Revision: D18125196 Pulled By: pdillinger fbshipit-source-id: f6bf83d49d20cbb2549926adf454fd035f0ecc0d --- build_tools/build_detect_platform | 13 + cache/cache_test.cc | 4 +- db/db_basic_test.cc | 5 +- db/memtable.cc | 2 +- memtable/hash_linklist_rep.cc | 3 +- .../block_based/block_based_table_builder.cc | 10 +- table/format.h | 2 - trace_replay/block_cache_tracer.cc | 4 +- util/hash.h | 61 +- util/hash_test.cc | 110 + util/xxh3p.h | 1643 +++++++++++++++ util/xxhash.cc | 1804 +++++++++-------- util/xxhash.h | 637 ++++-- .../transactions/transaction_lock_mgr.cc | 3 +- .../write_prepared_transaction_test.cc | 7 +- 15 files changed, 3267 insertions(+), 1041 deletions(-) create mode 100644 util/xxh3p.h diff --git a/build_tools/build_detect_platform b/build_tools/build_detect_platform index 042ac9f51..dcc4dfee6 100755 --- a/build_tools/build_detect_platform +++ b/build_tools/build_detect_platform @@ -605,6 +605,19 @@ elif test "$USE_SSE"; then echo "warning: USE_SSE specified but compiler could not use PCLMUL intrinsics, disabling" >&2 fi +$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() { + uint64_t a = 0xffffFFFFffffFFFF; + __uint128_t b = __uint128_t(a) * a; + a = static_cast(b >> 64); + (void)a; + } +EOF +if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DHAVE_UINT128_EXTENSION" +fi + # iOS doesn't support thread-local storage, but this check would erroneously # succeed because the cross-compiler flags are added by the Makefile, not this # script. diff --git a/cache/cache_test.cc b/cache/cache_test.cc index 1c6fc7719..a0f75bfdc 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -365,7 +365,7 @@ TEST_P(CacheTest, EvictionPolicy) { Insert(200, 201); // Frequently used entry must be kept around - for (int i = 0; i < kCacheSize + 200; i++) { + for (int i = 0; i < kCacheSize * 2; i++) { Insert(1000+i, 2000+i); ASSERT_EQ(101, Lookup(100)); } @@ -418,7 +418,7 @@ TEST_P(CacheTest, EvictionPolicyRef) { Insert(303, 104); // Insert entries much more than Cache capacity - for (int i = 0; i < kCacheSize + 200; i++) { + for (int i = 0; i < kCacheSize * 2; i++) { Insert(1000 + i, 2000 + i); } diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 07d27a41a..70ecb74ef 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -871,11 +871,12 @@ TEST_F(DBBasicTest, ChecksumTest) { ASSERT_OK(Flush()); } - // verify data with each type of checksum - for (int i = 0; i <= kxxHash64; ++i) { + // with each valid checksum type setting... + for (int i = 0; i <= max_checksum; ++i) { table_options.checksum = static_cast(i); options.table_factory.reset(NewBlockBasedTableFactory(table_options)); Reopen(options); + // verify every type of checksum (should be regardless of that setting) for (int j = 0; j < (max_checksum + 1) * kNumPerFile; ++j) { ASSERT_EQ(Key(j), Get(Key(j))); } diff --git a/db/memtable.cc b/db/memtable.cc index 8b2ddf0e1..e3c531c31 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -442,7 +442,7 @@ FragmentedRangeTombstoneIterator* MemTable::NewRangeTombstoneIterator( } port::RWMutex* MemTable::GetLock(const Slice& key) { - return &locks_[static_cast(GetSliceNPHash64(key)) % locks_.size()]; + return &locks_[fastrange64(GetSliceNPHash64(key), locks_.size())]; } MemTable::MemTableStats MemTable::ApproximateStats(const Slice& start_ikey, diff --git a/memtable/hash_linklist_rep.cc b/memtable/hash_linklist_rep.cc index e347abe6e..5c906165e 100644 --- a/memtable/hash_linklist_rep.cc +++ b/memtable/hash_linklist_rep.cc @@ -218,8 +218,7 @@ class HashLinkListRep : public MemTableRep { } size_t GetHash(const Slice& slice) const { - return NPHash64(slice.data(), static_cast(slice.size()), 0) % - bucket_size_; + return fastrange64(GetSliceNPHash64(slice), bucket_size_); } Pointer* GetBucket(size_t i) const { diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index cae93f7f2..c137581ee 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -733,11 +733,13 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents, break; } case kxxHash: { - void* xxh = XXH32_init(0); - XXH32_update(xxh, block_contents.data(), + XXH32_state_t* const state = XXH32_createState(); + XXH32_reset(state, 0); + XXH32_update(state, block_contents.data(), static_cast(block_contents.size())); - XXH32_update(xxh, trailer, 1); // Extend to cover block type - EncodeFixed32(trailer_without_type, XXH32_digest(xxh)); + XXH32_update(state, trailer, 1); // Extend to cover block type + EncodeFixed32(trailer_without_type, XXH32_digest(state)); + XXH32_freeState(state); break; } case kxxHash64: { diff --git a/table/format.h b/table/format.h index 552cd940d..0722d97b8 100644 --- a/table/format.h +++ b/table/format.h @@ -23,8 +23,6 @@ #include "port/malloc.h" #include "port/port.h" // noexcept #include "table/persistent_cache_options.h" -#include "util/crc32c.h" -#include "util/xxhash.h" namespace rocksdb { diff --git a/trace_replay/block_cache_tracer.cc b/trace_replay/block_cache_tracer.cc index c70290b67..0cf394afa 100644 --- a/trace_replay/block_cache_tracer.cc +++ b/trace_replay/block_cache_tracer.cc @@ -28,8 +28,8 @@ bool ShouldTrace(const Slice& block_key, const TraceOptions& trace_options) { } // We use spatial downsampling so that we have a complete access history for a // block. - const uint64_t hash = GetSliceNPHash64(block_key); - return hash % trace_options.sampling_frequency == 0; + return 0 == fastrange64(GetSliceNPHash64(block_key), + trace_options.sampling_frequency); } } // namespace diff --git a/util/hash.h b/util/hash.h index 836f325ef..d55e74fec 100644 --- a/util/hash.h +++ b/util/hash.h @@ -7,20 +7,25 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // -// Simple hash function used for internal data structures +// Common hash functions with convenient interfaces. #pragma once #include #include #include "rocksdb/slice.h" -#include "util/murmurhash.h" +#include "util/xxhash.h" namespace rocksdb { -// Non-persistent hash. Only used for in-memory data structure -// The hash results are applicable to change. -extern uint64_t NPHash64(const char* data, size_t n, uint32_t seed); +// Non-persistent hash. Must only used for in-memory data structure. +// The hash results are thus applicable to change. (Thus, it rarely makes +// sense to specify a seed for this function.) +inline uint64_t NPHash64(const char* data, size_t n, uint32_t seed = 0) { + // XXH3 currently experimental, but generally faster than other quality + // 64-bit hash functions. + return XXH3p_64bits_withSeed(data, n, seed); +} extern uint32_t Hash(const char* data, size_t n, uint32_t seed); @@ -29,32 +34,52 @@ inline uint32_t BloomHash(const Slice& key) { } inline uint64_t GetSliceNPHash64(const Slice& s) { - return NPHash64(s.data(), s.size(), 0); + return NPHash64(s.data(), s.size()); } inline uint32_t GetSliceHash(const Slice& s) { return Hash(s.data(), s.size(), 397); } -inline uint64_t NPHash64(const char* data, size_t n, uint32_t seed) { - // Right now murmurhash2B is used. It should able to be freely - // changed to a better hash, without worrying about backward - // compatibility issue. - return MURMUR_HASH(data, static_cast(n), - static_cast(seed)); -} - // std::hash compatible interface. struct SliceHasher { uint32_t operator()(const Slice& s) const { return GetSliceHash(s); } }; // An alternative to % for mapping a hash value to an arbitrary range. See -// https://github.com/lemire/fastrange and -// https://github.com/pdillinger/wormhashing/blob/2c4035a4462194bf15f3e9fc180c27c513335225/bloom_simulation_tests/foo.cc#L57 -inline uint32_t fastrange32(uint32_t a, uint32_t h) { - uint64_t product = static_cast(a) * h; +// https://github.com/lemire/fastrange +inline uint32_t fastrange32(uint32_t hash, uint32_t range) { + uint64_t product = uint64_t{range} * hash; return static_cast(product >> 32); } +// An alternative to % for mapping a 64-bit hash value to an arbitrary range +// that fits in size_t. See https://github.com/lemire/fastrange +// We find size_t more convenient than uint64_t for the range, with side +// benefit of better optimization on 32-bit platforms. +inline size_t fastrange64(uint64_t hash, size_t range) { +#if defined(HAVE_UINT128_EXTENSION) + // Can use compiler's 128-bit type. Trust it to do the right thing. + __uint128_t wide = __uint128_t{range} * hash; + return static_cast(wide >> 64); +#else + // Fall back: full decomposition. + // NOTE: GCC seems to fully understand this code as 64-bit x {32 or 64}-bit + // -> {96 or 128}-bit multiplication and optimize it down to a single + // wide-result multiplication (64-bit platform) or two wide-result + // multiplications (32-bit platforms, where range64 >> 32 is zero). + uint64_t range64 = range; // ok to shift by 32, even if size_t is 32-bit + uint64_t tmp = uint64_t{range64 & 0xffffFFFF} * uint64_t{hash & 0xffffFFFF}; + tmp >>= 32; + tmp += uint64_t{range64 & 0xffffFFFF} * uint64_t{hash >> 32}; + // Avoid overflow: first add lower 32 of tmp2, and later upper 32 + uint64_t tmp2 = uint64_t{range64 >> 32} * uint64_t{hash & 0xffffFFFF}; + tmp += static_cast(tmp2); + tmp >>= 32; + tmp += (tmp2 >> 32); + tmp += uint64_t{range64 >> 32} * uint64_t{hash >> 32}; + return static_cast(tmp); +#endif +} + } // namespace rocksdb diff --git a/util/hash_test.cc b/util/hash_test.cc index dcfe39fbc..27eddfbe5 100644 --- a/util/hash_test.cc +++ b/util/hash_test.cc @@ -70,6 +70,116 @@ TEST(HashTest, Values) { 3382479516u); } +TEST(Fastrange32Test, Values) { + using rocksdb::fastrange32; + // Zero range + EXPECT_EQ(fastrange32(0, 0), 0U); + EXPECT_EQ(fastrange32(123, 0), 0U); + EXPECT_EQ(fastrange32(0xffffffff, 0), 0U); + + // One range + EXPECT_EQ(fastrange32(0, 1), 0U); + EXPECT_EQ(fastrange32(123, 1), 0U); + EXPECT_EQ(fastrange32(0xffffffff, 1), 0U); + + // Two range + EXPECT_EQ(fastrange32(0, 2), 0U); + EXPECT_EQ(fastrange32(123, 2), 0U); + EXPECT_EQ(fastrange32(0x7fffffff, 2), 0U); + EXPECT_EQ(fastrange32(0x80000000, 2), 1U); + EXPECT_EQ(fastrange32(0xffffffff, 2), 1U); + + // Seven range + EXPECT_EQ(fastrange32(0, 7), 0U); + EXPECT_EQ(fastrange32(123, 7), 0U); + EXPECT_EQ(fastrange32(613566756, 7), 0U); + EXPECT_EQ(fastrange32(613566757, 7), 1U); + EXPECT_EQ(fastrange32(1227133513, 7), 1U); + EXPECT_EQ(fastrange32(1227133514, 7), 2U); + // etc. + EXPECT_EQ(fastrange32(0xffffffff, 7), 6U); + + // Big + EXPECT_EQ(fastrange32(1, 0x80000000), 0U); + EXPECT_EQ(fastrange32(2, 0x80000000), 1U); + EXPECT_EQ(fastrange32(4, 0x7fffffff), 1U); + EXPECT_EQ(fastrange32(4, 0x80000000), 2U); + EXPECT_EQ(fastrange32(0xffffffff, 0x7fffffff), 0x7ffffffeU); + EXPECT_EQ(fastrange32(0xffffffff, 0x80000000), 0x7fffffffU); +} + +TEST(Fastrange64Test, Values) { + using rocksdb::fastrange64; + // Zero range + EXPECT_EQ(fastrange64(0, 0), 0U); + EXPECT_EQ(fastrange64(123, 0), 0U); + EXPECT_EQ(fastrange64(0xffffFFFF, 0), 0U); + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 0), 0U); + + // One range + EXPECT_EQ(fastrange64(0, 1), 0U); + EXPECT_EQ(fastrange64(123, 1), 0U); + EXPECT_EQ(fastrange64(0xffffFFFF, 1), 0U); + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 1), 0U); + + // Two range + EXPECT_EQ(fastrange64(0, 2), 0U); + EXPECT_EQ(fastrange64(123, 2), 0U); + EXPECT_EQ(fastrange64(0xffffFFFF, 2), 0U); + EXPECT_EQ(fastrange64(0x7fffFFFFffffFFFF, 2), 0U); + EXPECT_EQ(fastrange64(0x8000000000000000, 2), 1U); + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 2), 1U); + + // Seven range + EXPECT_EQ(fastrange64(0, 7), 0U); + EXPECT_EQ(fastrange64(123, 7), 0U); + EXPECT_EQ(fastrange64(0xffffFFFF, 7), 0U); + EXPECT_EQ(fastrange64(2635249153387078802, 7), 0U); + EXPECT_EQ(fastrange64(2635249153387078803, 7), 1U); + EXPECT_EQ(fastrange64(5270498306774157604, 7), 1U); + EXPECT_EQ(fastrange64(5270498306774157605, 7), 2U); + EXPECT_EQ(fastrange64(0x7fffFFFFffffFFFF, 7), 3U); + EXPECT_EQ(fastrange64(0x8000000000000000, 7), 3U); + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 7), 6U); + + // Big but 32-bit range + EXPECT_EQ(fastrange64(0x100000000, 0x80000000), 0U); + EXPECT_EQ(fastrange64(0x200000000, 0x80000000), 1U); + EXPECT_EQ(fastrange64(0x400000000, 0x7fffFFFF), 1U); + EXPECT_EQ(fastrange64(0x400000000, 0x80000000), 2U); + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 0x7fffFFFF), 0x7fffFFFEU); + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 0x80000000), 0x7fffFFFFU); + + // Big, > 32-bit range +#if SIZE_MAX == UINT64_MAX + EXPECT_EQ(fastrange64(0x7fffFFFFffffFFFF, 0x4200000002), 0x2100000000U); + EXPECT_EQ(fastrange64(0x8000000000000000, 0x4200000002), 0x2100000001U); + + EXPECT_EQ(fastrange64(0x0000000000000000, 420000000002), 0U); + EXPECT_EQ(fastrange64(0x7fffFFFFffffFFFF, 420000000002), 210000000000U); + EXPECT_EQ(fastrange64(0x8000000000000000, 420000000002), 210000000001U); + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 420000000002), 420000000001U); + + EXPECT_EQ(fastrange64(0xffffFFFFffffFFFF, 0xffffFFFFffffFFFF), + 0xffffFFFFffffFFFEU); +#endif +} + +// for inspection of disassembly +uint32_t fastrange32(uint32_t hash, uint32_t range) { + return rocksdb::fastrange32(hash, range); +} + +// for inspection of disassembly +size_t fastrange64(uint64_t hash, size_t range) { + return rocksdb::fastrange64(hash, range); +} + +// for inspection of disassembly +uint64_t NPHash64(const char* data, size_t n) { + return rocksdb::NPHash64(data, n); +} + int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); diff --git a/util/xxh3p.h b/util/xxh3p.h new file mode 100644 index 000000000..7d28ad3f2 --- /dev/null +++ b/util/xxh3p.h @@ -0,0 +1,1643 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +/* + xxHash - Extremely Fast Hash algorithm + Development source file for `xxh3` + Copyright (C) 2019-present, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* RocksDB Note: This file contains a preview release (xxhash repository + version 0.7.2) of XXH3 that is unlikely to be compatible with the final + version of XXH3. We have therefore renamed this XXH3p ("preview"), for + clarity so that we can continue to use this version even after + integrating a newer incompatible version. +*/ + +/* Note : + This file is separated for development purposes. + It will be integrated into `xxhash.c` when development phase is complete. +*/ + +#ifndef XXH3p_H +#define XXH3p_H + + +/* === Dependencies === */ + +#undef XXH_INLINE_ALL /* in case it's already defined */ +#define XXH_INLINE_ALL +#include "xxhash.h" + + +/* === Compiler specifics === */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#else +/* note : it might be useful to define __restrict or __restrict__ for some C++ compilers */ +# define XXH_RESTRICT /* disable */ +#endif + +#if defined(__GNUC__) +# if defined(__AVX2__) +# include +# elif defined(__SSE2__) +# include +# elif defined(__ARM_NEON__) || defined(__ARM_NEON) +# define inline __inline__ /* clang bug */ +# include +# undef inline +# endif +#elif defined(_MSC_VER) +# include +#endif + +/* + * Sanity check. + * + * XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * + * Almost all 32-bit and 64-bit targets meet this, except for Thumb-1, the + * classic 16-bit only subset of ARM's instruction set. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand is helpful too. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we + * will give a warning. + * + * Usually, if this happens, it is because of an accident and you probably + * need to specify -march, as you probably meant to compileh for a newer + * architecture. + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ +#define XXH_SCALAR 0 +#define XXH_SSE2 1 +#define XXH_AVX2 2 +#define XXH_NEON 3 +#define XXH_VSX 4 + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif defined(__GNUC__) /* msvc support maybe later */ \ + && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \ + && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) +# define XXH_VECTOR XXH_NEON +# elif defined(__PPC64__) && defined(__POWER8_VECTOR__) && defined(__GNUC__) +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* control alignment of accumulator, + * for compatibility with fast vector loads */ +#ifndef XXH_ACC_ALIGN +# if XXH_VECTOR == 0 /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == 1 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == 2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == 3 /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == 4 /* vsx */ +# define XXH_ACC_ALIGN 16 +# endif +#endif + +/* xxh_u64 XXH_mult32to64(xxh_u32 a, xxh_u64 b) { return (xxh_u64)a * (xxh_u64)b; } */ +#if defined(_MSC_VER) && defined(_M_IX86) +# include +# define XXH_mult32to64(x, y) __emulu(x, y) +#else +# define XXH_mult32to64(x, y) ((xxh_u64)((x) & 0xFFFFFFFF) * (xxh_u64)((y) & 0xFFFFFFFF)) +#endif + +/* VSX stuff. It's a lot because VSX support is mediocre across compilers and + * there is a lot of mischief with endianness. */ +#if XXH_VECTOR == XXH_VSX +# include +# undef vector +typedef __vector unsigned long long U64x2; +typedef __vector unsigned char U8x16; +typedef __vector unsigned U32x4; + +#ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +#endif + +/* We need some helpers for big endian mode. */ +#if XXH_VSX_BE +/* A wrapper for POWER9's vec_revb. */ +# ifdef __POWER9_VECTOR__ +# define XXH_vec_revb vec_revb +# else +XXH_FORCE_INLINE U64x2 XXH_vec_revb(U64x2 val) +{ + U8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +# endif + +/* Power8 Crypto gives us vpermxor which is very handy for + * PPC64EB. + * + * U8x16 vpermxor(U8x16 a, U8x16 b, U8x16 mask) + * { + * U8x16 ret; + * for (int i = 0; i < 16; i++) { + * ret[i] = a[mask[i] & 0xF] ^ b[mask[i] >> 4]; + * } + * return ret; + * } + * + * Because both of the main loops load the key, swap, and xor it with input, + * we can combine the key swap into this instruction. + */ +# ifdef vec_permxor +# define XXH_vec_permxor vec_permxor +# else +# define XXH_vec_permxor __builtin_crypto_vpermxor +# endif +#endif /* XXH_VSX_BE */ +/* + * Because we reinterpret the multiply, there are endian memes: vec_mulo actually becomes + * vec_mule. + * + * Additionally, the intrinsic wasn't added until GCC 8, despite existing for a while. + * Clang has an easy way to control this, we can just use the builtin which doesn't swap. + * GCC needs inline assembly. */ +#if __has_builtin(__builtin_altivec_vmuleuw) +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +#else +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE U64x2 XXH_vec_mulo(U32x4 a, U32x4 b) { + U64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE U64x2 XXH_vec_mule(U32x4 a, U32x4 b) { + U64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +#endif /* __has_builtin(__builtin_altivec_vmuleuw) */ +#endif /* XXH_VECTOR == XXH_VSX */ + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + + +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3p_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3p_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +XXH_ALIGN(64) static const xxh_u8 kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + +/* + * GCC for x86 has a tendency to use SSE in this loop. While it + * successfully avoids swapping (as MUL overwrites EAX and EDX), it + * slows it down because instead of free register swap shifts, it + * must use pshufd and punpckl/hd. + * + * To prevent this, we use this attribute to shut off SSE. + */ +#if defined(__GNUC__) && !defined(__clang__) && defined(__i386__) +__attribute__((__target__("no-sse"))) +#endif +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this + * type despite not having the arithmetic for it. This results in a + * laggy compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if defined(__GNUC__) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t const r128 = { (xxh_u64)(product), (xxh_u64)(product >> 64) }; + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif defined(_M_X64) || defined(_M_IA64) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t const r128 = { product_low, product_high }; + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown + * below with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 + * --------- + * 6 9 7 5 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for + * UINT64_MAX. This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARMv6+ A32/T32, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, + * and allows this to be calculated in only 4 instructions which + * is comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be + * a couple of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128 = { lower, upper }; + return r128; +#endif +} + +/* + * We want to keep the attribute here because a target switch + * disables inlining. + * + * Does a 64-bit to 128-bit multiply, then XOR folds it. + * The reason for the separate function is to prevent passing + * too many structs around by value. This will hopefully inline + * the multiply, but we don't force it. + */ +#if defined(__GNUC__) && !defined(__clang__) && defined(__i386__) +__attribute__((__target__("no-sse"))) +#endif +static xxh_u64 +XXH3p_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + + +static XXH64_hash_t XXH3p_avalanche(xxh_u64 h64) +{ + h64 ^= h64 >> 37; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +/* ========================================== + * Short keys + * ========================================== */ + +XXH_FORCE_INLINE XXH64_hash_t +XXH3p_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1) | (((xxh_u32)c2) << 8) | (((xxh_u32)c3) << 16) | (((xxh_u32)len) << 24); + xxh_u64 const keyed = (xxh_u64)combined ^ (XXH_readLE32(secret) + seed); + xxh_u64 const mixed = keyed * PRIME64_1; + return XXH3p_avalanche(mixed); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3p_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo | ((xxh_u64)input_hi << 32); + xxh_u64 const keyed = input_64 ^ (XXH_readLE64(secret) + seed); + xxh_u64 const mix64 = len + ((keyed ^ (keyed >> 51)) * PRIME32_1); + return XXH3p_avalanche((mix64 ^ (mix64 >> 47)) * PRIME64_2); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3p_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed); + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret + 8) - seed); + xxh_u64 const acc = len + (input_lo + input_hi) + XXH3p_mul128_fold64(input_lo, input_hi); + return XXH3p_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3p_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3p_len_9to16_64b(input, len, secret, seed); + if (len >= 4) return XXH3p_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3p_len_1to3_64b(input, len, secret, seed); + return 0; + } +} + + +/* === Long Keys === */ + +#define STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define ACC_NB (STRIPE_LEN / sizeof(xxh_u64)) + +typedef enum { XXH3p_acc_64bits, XXH3p_acc_128bits } XXH3p_accWidth_e; + +XXH_FORCE_INLINE void +XXH3p_accumulate_512( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret, + XXH3p_accWidth_e accWidth) +{ +#if (XXH_VECTOR == XXH_AVX2) + + XXH_ASSERT((((size_t)acc) & 31) == 0); + { XXH_ALIGN(32) __m256i* const xacc = (__m256i *) acc; + const __m256i* const xinput = (const __m256i *) input; /* not really aligned, just for ptr arithmetic, and because _mm256_loadu_si256() requires this type */ + const __m256i* const xsecret = (const __m256i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm256_loadu_si256() requires this type */ + + size_t i; + for (i=0; i < STRIPE_LEN/sizeof(__m256i); i++) { + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); /* uint32 dk[8] = {d0+k0, d1+k1, d2+k2, d3+k3, ...} */ + __m256i const product = _mm256_mul_epu32 (data_key, _mm256_shuffle_epi32 (data_key, 0x31)); /* uint64 mul[4] = {dk0*dk1, dk2*dk3, ...} */ + if (accWidth == XXH3p_acc_128bits) { + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + xacc[i] = _mm256_add_epi64(product, sum); + } else { /* XXH3p_acc_64bits */ + __m256i const sum = _mm256_add_epi64(xacc[i], data_vec); + xacc[i] = _mm256_add_epi64(product, sum); + } + } } + +#elif (XXH_VECTOR == XXH_SSE2) + + XXH_ASSERT((((size_t)acc) & 15) == 0); + { XXH_ALIGN(16) __m128i* const xacc = (__m128i *) acc; + const __m128i* const xinput = (const __m128i *) input; /* not really aligned, just for ptr arithmetic, and because _mm_loadu_si128() requires this type */ + const __m128i* const xsecret = (const __m128i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm_loadu_si128() requires this type */ + + size_t i; + for (i=0; i < STRIPE_LEN/sizeof(__m128i); i++) { + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); /* uint32 dk[8] = {d0+k0, d1+k1, d2+k2, d3+k3, ...} */ + __m128i const product = _mm_mul_epu32 (data_key, _mm_shuffle_epi32 (data_key, 0x31)); /* uint64 mul[4] = {dk0*dk1, dk2*dk3, ...} */ + if (accWidth == XXH3p_acc_128bits) { + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + xacc[i] = _mm_add_epi64(product, sum); + } else { /* XXH3p_acc_64bits */ + __m128i const sum = _mm_add_epi64(xacc[i], data_vec); + xacc[i] = _mm_add_epi64(product, sum); + } + } } + +#elif (XXH_VECTOR == XXH_NEON) + + XXH_ASSERT((((size_t)acc) & 15) == 0); + { + XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* const xinput = (const uint8_t *) input; + uint8_t const* const xsecret = (const uint8_t *) secret; + + size_t i; + for (i=0; i < STRIPE_LEN / sizeof(uint64x2_t); i++) { +#if !defined(__aarch64__) && !defined(__arm64__) && defined(__GNUC__) /* ARM32-specific hack */ + /* vzip on ARMv7 Clang generates a lot of vmovs (technically vorrs) without this. + * vzip on 32-bit ARM NEON will overwrite the original register, and I think that Clang + * assumes I don't want to destroy it and tries to make a copy. This slows down the code + * a lot. + * aarch64 not only uses an entirely different syntax, but it requires three + * instructions... + * ext v1.16B, v0.16B, #8 // select high bits because aarch64 can't address them directly + * zip1 v3.2s, v0.2s, v1.2s // first zip + * zip2 v2.2s, v0.2s, v1.2s // second zip + * ...to do what ARM does in one: + * vzip.32 d0, d1 // Interleave high and low bits and overwrite. */ + + /* data_vec = xsecret[i]; */ + uint8x16_t const data_vec = vld1q_u8(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint8x16_t const key_vec = vld1q_u8(xsecret + (i * 16)); + /* data_key = data_vec ^ key_vec; */ + uint32x4_t data_key; + + if (accWidth == XXH3p_acc_64bits) { + /* Add first to prevent register swaps */ + /* xacc[i] += data_vec; */ + xacc[i] = vaddq_u64 (xacc[i], vreinterpretq_u64_u8(data_vec)); + } else { /* XXH3p_acc_128bits */ + /* xacc[i] += swap(data_vec); */ + /* can probably be optimized better */ + uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); + uint64x2_t const swapped= vextq_u64(data64, data64, 1); + xacc[i] = vaddq_u64 (xacc[i], swapped); + } + + data_key = vreinterpretq_u32_u8(veorq_u8(data_vec, key_vec)); + + /* Here's the magic. We use the quirkiness of vzip to shuffle data_key in place. + * shuffle: data_key[0, 1, 2, 3] = data_key[0, 2, 1, 3] */ + __asm__("vzip.32 %e0, %f0" : "+w" (data_key)); + /* xacc[i] += (uint64x2_t) data_key[0, 1] * (uint64x2_t) data_key[2, 3]; */ + xacc[i] = vmlal_u32(xacc[i], vget_low_u32(data_key), vget_high_u32(data_key)); + +#else + /* On aarch64, vshrn/vmovn seems to be equivalent to, if not faster than, the vzip method. */ + + /* data_vec = xsecret[i]; */ + uint8x16_t const data_vec = vld1q_u8(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint8x16_t const key_vec = vld1q_u8(xsecret + (i * 16)); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t const data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec)); + /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF); */ + uint32x2_t const data_key_lo = vmovn_u64 (data_key); + /* data_key_hi = (uint32x2_t) (data_key >> 32); */ + uint32x2_t const data_key_hi = vshrn_n_u64 (data_key, 32); + if (accWidth == XXH3p_acc_64bits) { + /* xacc[i] += data_vec; */ + xacc[i] = vaddq_u64 (xacc[i], vreinterpretq_u64_u8(data_vec)); + } else { /* XXH3p_acc_128bits */ + /* xacc[i] += swap(data_vec); */ + uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); + uint64x2_t const swapped= vextq_u64(data64, data64, 1); + xacc[i] = vaddq_u64 (xacc[i], swapped); + } + /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */ + xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi); + +#endif + } + } + +#elif (XXH_VECTOR == XXH_VSX) + U64x2* const xacc = (U64x2*) acc; /* presumed aligned */ + U64x2 const* const xinput = (U64x2 const*) input; /* no alignment restriction */ + U64x2 const* const xsecret = (U64x2 const*) secret; /* no alignment restriction */ + U64x2 const v32 = { 32, 32 }; +#if XXH_VSX_BE + U8x16 const vXorSwap = { 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, 0x70, + 0x8F, 0x9E, 0xAD, 0xBC, 0xCB, 0xDA, 0xE9, 0xF8 }; +#endif + size_t i; + for (i = 0; i < STRIPE_LEN / sizeof(U64x2); i++) { + /* data_vec = xinput[i]; */ + /* key_vec = xsecret[i]; */ +#if XXH_VSX_BE + /* byteswap */ + U64x2 const data_vec = XXH_vec_revb(vec_vsx_ld(0, xinput + i)); + U64x2 const key_raw = vec_vsx_ld(0, xsecret + i); + /* See comment above. data_key = data_vec ^ swap(xsecret[i]); */ + U64x2 const data_key = (U64x2)XXH_vec_permxor((U8x16)data_vec, (U8x16)key_raw, vXorSwap); +#else + U64x2 const data_vec = vec_vsx_ld(0, xinput + i); + U64x2 const key_vec = vec_vsx_ld(0, xsecret + i); + U64x2 const data_key = data_vec ^ key_vec; +#endif + /* shuffled = (data_key << 32) | (data_key >> 32); */ + U32x4 const shuffled = (U32x4)vec_rl(data_key, v32); + /* product = ((U64x2)data_key & 0xFFFFFFFF) * ((U64x2)shuffled & 0xFFFFFFFF); */ + U64x2 const product = XXH_vec_mulo((U32x4)data_key, shuffled); + xacc[i] += product; + + if (accWidth == XXH3p_acc_64bits) { + xacc[i] += data_vec; + } else { /* XXH3p_acc_128bits */ + /* swap high and low halves */ + U64x2 const data_swapped = vec_xxpermdi(data_vec, data_vec, 2); + xacc[i] += data_swapped; + } + } + +#else /* scalar variant of Accumulator - universal */ + + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */ + const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + size_t i; + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + for (i=0; i < ACC_NB; i++) { + xxh_u64 const data_val = XXH_readLE64(xinput + 8*i); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8); + + if (accWidth == XXH3p_acc_64bits) { + xacc[i] += data_val; + } else { + xacc[i ^ 1] += data_val; /* swap adjacent lanes */ + } + xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32); + } +#endif +} + +XXH_FORCE_INLINE void +XXH3p_scrambleAcc(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ +#if (XXH_VECTOR == XXH_AVX2) + + XXH_ASSERT((((size_t)acc) & 31) == 0); + { XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc; + const __m256i* const xsecret = (const __m256i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm256_loadu_si256() requires this argument type */ + const __m256i prime32 = _mm256_set1_epi32((int)PRIME32_1); + + size_t i; + for (i=0; i < STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= PRIME32_1; */ + __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, 0x31); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } + +#elif (XXH_VECTOR == XXH_SSE2) + + XXH_ASSERT((((size_t)acc) & 15) == 0); + { XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc; + const __m128i* const xsecret = (const __m128i *) secret; /* not really aligned, just for ptr arithmetic, and because _mm_loadu_si128() requires this argument type */ + const __m128i prime32 = _mm_set1_epi32((int)PRIME32_1); + + size_t i; + for (i=0; i < STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, 0x31); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } + +#elif (XXH_VECTOR == XXH_NEON) + + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { uint64x2_t* const xacc = (uint64x2_t*) acc; + uint8_t const* const xsecret = (uint8_t const*) secret; + uint32x2_t const prime = vdup_n_u32 (PRIME32_1); + + size_t i; + for (i=0; i < STRIPE_LEN/sizeof(uint64x2_t); i++) { + /* data_vec = xacc[i] ^ (xacc[i] >> 47); */ + uint64x2_t const acc_vec = xacc[i]; + uint64x2_t const shifted = vshrq_n_u64 (acc_vec, 47); + uint64x2_t const data_vec = veorq_u64 (acc_vec, shifted); + + /* key_vec = xsecret[i]; */ + uint32x4_t const key_vec = vreinterpretq_u32_u8(vld1q_u8(xsecret + (i * 16))); + /* data_key = data_vec ^ key_vec; */ + uint32x4_t const data_key = veorq_u32 (vreinterpretq_u32_u64(data_vec), key_vec); + /* shuffled = { data_key[0, 2], data_key[1, 3] }; */ + uint32x2x2_t const shuffled = vzip_u32 (vget_low_u32(data_key), vget_high_u32(data_key)); + + /* data_key *= PRIME32_1 */ + + /* prod_hi = (data_key >> 32) * PRIME32_1; */ + uint64x2_t const prod_hi = vmull_u32 (shuffled.val[1], prime); + /* xacc[i] = prod_hi << 32; */ + xacc[i] = vshlq_n_u64(prod_hi, 32); + /* xacc[i] += (prod_hi & 0xFFFFFFFF) * PRIME32_1; */ + xacc[i] = vmlal_u32(xacc[i], shuffled.val[0], prime); + } } + +#elif (XXH_VECTOR == XXH_VSX) + + U64x2* const xacc = (U64x2*) acc; + const U64x2* const xsecret = (const U64x2*) secret; + /* constants */ + U64x2 const v32 = { 32, 32 }; + U64x2 const v47 = { 47, 47 }; + U32x4 const prime = { PRIME32_1, PRIME32_1, PRIME32_1, PRIME32_1 }; + size_t i; +#if XXH_VSX_BE + /* endian swap */ + U8x16 const vXorSwap = { 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, 0x70, + 0x8F, 0x9E, 0xAD, 0xBC, 0xCB, 0xDA, 0xE9, 0xF8 }; +#endif + for (i = 0; i < STRIPE_LEN / sizeof(U64x2); i++) { + U64x2 const acc_vec = xacc[i]; + U64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + /* key_vec = xsecret[i]; */ +#if XXH_VSX_BE + /* swap bytes words */ + U64x2 const key_raw = vec_vsx_ld(0, xsecret + i); + U64x2 const data_key = (U64x2)XXH_vec_permxor((U8x16)data_vec, (U8x16)key_raw, vXorSwap); +#else + U64x2 const key_vec = vec_vsx_ld(0, xsecret + i); + U64x2 const data_key = data_vec ^ key_vec; +#endif + + /* data_key *= PRIME32_1 */ + + /* prod_lo = ((U64x2)data_key & 0xFFFFFFFF) * ((U64x2)prime & 0xFFFFFFFF); */ + U64x2 const prod_even = XXH_vec_mule((U32x4)data_key, prime); + /* prod_hi = ((U64x2)data_key >> 32) * ((U64x2)prime >> 32); */ + U64x2 const prod_odd = XXH_vec_mulo((U32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } + +#else /* scalar variant of Scrambler - universal */ + + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + size_t i; + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + for (i=0; i < ACC_NB; i++) { + xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i); + xxh_u64 acc64 = xacc[i]; + acc64 ^= acc64 >> 47; + acc64 ^= key64; + acc64 *= PRIME32_1; + xacc[i] = acc64; + } + +#endif +} + +#define XXH_PREFETCH_DIST 384 + +/* assumption : nbStripes will not overflow secret size */ +XXH_FORCE_INLINE void +XXH3p_accumulate( xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes, + XXH3p_accWidth_e accWidth) +{ + size_t n; + for (n = 0; n < nbStripes; n++ ) { + const xxh_u8* const in = input + n*STRIPE_LEN; + XXH_PREFETCH(in + XXH_PREFETCH_DIST); + XXH3p_accumulate_512(acc, + in, + secret + n*XXH_SECRET_CONSUME_RATE, + accWidth); + } +} + +/* note : clang auto-vectorizes well in SS2 mode _if_ this function is `static`, + * and doesn't auto-vectorize it at all if it is `FORCE_INLINE`. + * However, it auto-vectorizes better AVX2 if it is `FORCE_INLINE` + * Pretty much every other modes and compilers prefer `FORCE_INLINE`. + */ + +#if defined(__clang__) && (XXH_VECTOR==0) && !defined(__AVX2__) && !defined(__arm__) && !defined(__thumb__) +static void +#else +XXH_FORCE_INLINE void +#endif +XXH3p_hashLong_internal_loop( xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3p_accWidth_e accWidth) +{ + size_t const nb_rounds = (secretSize - STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = STRIPE_LEN * nb_rounds; + size_t const nb_blocks = len / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + XXH3p_accumulate(acc, input + n*block_len, secret, nb_rounds, accWidth); + XXH3p_scrambleAcc(acc, secret + secretSize - STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > STRIPE_LEN); + { size_t const nbStripes = (len - (block_len * nb_blocks)) / STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + XXH3p_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, accWidth); + + /* last stripe */ + if (len & (STRIPE_LEN - 1)) { + const xxh_u8* const p = input + len - STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* do not align on 8, so that secret is different from scrambler */ + XXH3p_accumulate_512(acc, p, secret + secretSize - STRIPE_LEN - XXH_SECRET_LASTACC_START, accWidth); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3p_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3p_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3p_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + + result64 += XXH3p_mix2Accs(acc+0, secret + 0); + result64 += XXH3p_mix2Accs(acc+2, secret + 16); + result64 += XXH3p_mix2Accs(acc+4, secret + 32); + result64 += XXH3p_mix2Accs(acc+6, secret + 48); + + return XXH3p_avalanche(result64); +} + +#define XXH3p_INIT_ACC { PRIME32_3, PRIME64_1, PRIME64_2, PRIME64_3, \ + PRIME64_4, PRIME32_2, PRIME64_5, PRIME32_1 }; + +XXH_FORCE_INLINE XXH64_hash_t +XXH3p_hashLong_internal(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[ACC_NB] = XXH3p_INIT_ACC; + + XXH3p_hashLong_internal_loop(acc, input, len, secret, secretSize, XXH3p_acc_64bits); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); +#define XXH_SECRET_MERGEACCS_START 11 /* do not align on 8, so that secret is different from accumulator */ + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3p_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * PRIME64_1); +} + + +XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3p_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ +XXH3p_hashLong_64b_defaultSecret(const xxh_u8* XXH_RESTRICT input, size_t len) +{ + return XXH3p_hashLong_internal(input, len, kSecret, sizeof(kSecret)); +} + +XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3p_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ +XXH3p_hashLong_64b_withSecret(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize) +{ + return XXH3p_hashLong_internal(input, len, secret, secretSize); +} + + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + memcpy(dst, &v64, sizeof(v64)); +} + +/* XXH3p_initCustomSecret() : + * destination `customSecret` is presumed allocated and same size as `kSecret`. + */ +XXH_FORCE_INLINE void XXH3p_initCustomSecret(xxh_u8* customSecret, xxh_u64 seed64) +{ + int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + + for (i=0; i < nbRounds; i++) { + XXH_writeLE64(customSecret + 16*i, XXH_readLE64(kSecret + 16*i) + seed64); + XXH_writeLE64(customSecret + 16*i + 8, XXH_readLE64(kSecret + 16*i + 8) - seed64); + } +} + + +/* XXH3p_hashLong_64b_withSeed() : + * Generate a custom key, + * based on alteration of default kSecret with the seed, + * and then use this key for long mode hashing. + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + */ +XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3p_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ +XXH3p_hashLong_64b_withSeed(const xxh_u8* input, size_t len, XXH64_hash_t seed) +{ + XXH_ALIGN(8) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + if (seed==0) return XXH3p_hashLong_64b_defaultSecret(input, len); + XXH3p_initCustomSecret(secret, seed); + return XXH3p_hashLong_internal(input, len, secret, sizeof(secret)); +} + + +XXH_FORCE_INLINE xxh_u64 XXH3p_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3p_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) ); +} + + +XXH_FORCE_INLINE XXH64_hash_t +XXH3p_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * PRIME64_1; + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3p_mix16B(input+48, secret+96, seed); + acc += XXH3p_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3p_mix16B(input+32, secret+64, seed); + acc += XXH3p_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3p_mix16B(input+16, secret+32, seed); + acc += XXH3p_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3p_mix16B(input+0, secret+0, seed); + acc += XXH3p_mix16B(input+len-16, secret+16, seed); + + return XXH3p_avalanche(acc); + } +} + +#define XXH3p_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH64_hash_t +XXH3p_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3p_MIDSIZE_MAX); + + #define XXH3p_MIDSIZE_STARTOFFSET 3 + #define XXH3p_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * PRIME64_1; + int const nbRounds = (int)len / 16; + int i; + for (i=0; i<8; i++) { + acc += XXH3p_mix16B(input+(16*i), secret+(16*i), seed); + } + acc = XXH3p_avalanche(acc); + XXH_ASSERT(nbRounds >= 8); + for (i=8 ; i < nbRounds; i++) { + acc += XXH3p_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3p_MIDSIZE_STARTOFFSET, seed); + } + /* last bytes */ + acc += XXH3p_mix16B(input + len - 16, secret + XXH3p_SECRET_SIZE_MIN - XXH3p_MIDSIZE_LASTOFFSET, seed); + return XXH3p_avalanche(acc); + } +} + +/* === Public entry point === */ + +XXH_PUBLIC_API XXH64_hash_t XXH3p_64bits(const void* input, size_t len) +{ + if (len <= 16) return XXH3p_len_0to16_64b((const xxh_u8*)input, len, kSecret, 0); + if (len <= 128) return XXH3p_len_17to128_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); + if (len <= XXH3p_MIDSIZE_MAX) return XXH3p_len_129to240_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); + return XXH3p_hashLong_64b_defaultSecret((const xxh_u8*)input, len); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3p_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +{ + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); + /* if an action must be taken should `secret` conditions not be respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash */ + if (len <= 16) return XXH3p_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, 0); + if (len <= 128) return XXH3p_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); + if (len <= XXH3p_MIDSIZE_MAX) return XXH3p_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); + return XXH3p_hashLong_64b_withSecret((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3p_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +{ + if (len <= 16) return XXH3p_len_0to16_64b((const xxh_u8*)input, len, kSecret, seed); + if (len <= 128) return XXH3p_len_17to128_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); + if (len <= XXH3p_MIDSIZE_MAX) return XXH3p_len_129to240_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); + return XXH3p_hashLong_64b_withSeed((const xxh_u8*)input, len, seed); +} + +/* === XXH3 streaming === */ + +XXH_PUBLIC_API XXH3p_state_t* XXH3p_createState(void) +{ + return (XXH3p_state_t*)XXH_malloc(sizeof(XXH3p_state_t)); +} + +XXH_PUBLIC_API XXH_errorcode XXH3p_freeState(XXH3p_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void +XXH3p_copyState(XXH3p_state_t* dst_state, const XXH3p_state_t* src_state) +{ + memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3p_64bits_reset_internal(XXH3p_state_t* statePtr, + XXH64_hash_t seed, + const xxh_u8* secret, size_t secretSize) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->acc[0] = PRIME32_3; + statePtr->acc[1] = PRIME64_1; + statePtr->acc[2] = PRIME64_2; + statePtr->acc[3] = PRIME64_3; + statePtr->acc[4] = PRIME64_4; + statePtr->acc[5] = PRIME32_2; + statePtr->acc[6] = PRIME64_5; + statePtr->acc[7] = PRIME32_1; + statePtr->seed = seed; + XXH_ASSERT(secret != NULL); + statePtr->secret = secret; + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); + statePtr->secretLimit = (XXH32_hash_t)(secretSize - STRIPE_LEN); + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_64bits_reset(XXH3p_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3p_64bits_reset_internal(statePtr, 0, kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_64bits_reset_withSecret(XXH3p_state_t* statePtr, const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3p_64bits_reset_internal(statePtr, 0, (const xxh_u8*)secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3p_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_64bits_reset_withSeed(XXH3p_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3p_64bits_reset_internal(statePtr, seed, kSecret, XXH_SECRET_DEFAULT_SIZE); + XXH3p_initCustomSecret(statePtr->customSecret, seed); + statePtr->secret = statePtr->customSecret; + return XXH_OK; +} + +XXH_FORCE_INLINE void +XXH3p_consumeStripes( xxh_u64* acc, + XXH32_hash_t* nbStripesSoFarPtr, XXH32_hash_t nbStripesPerBlock, + const xxh_u8* input, size_t totalStripes, + const xxh_u8* secret, size_t secretLimit, + XXH3p_accWidth_e accWidth) +{ + XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock); + if (nbStripesPerBlock - *nbStripesSoFarPtr <= totalStripes) { + /* need a scrambling operation */ + size_t const nbStripes = nbStripesPerBlock - *nbStripesSoFarPtr; + XXH3p_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, accWidth); + XXH3p_scrambleAcc(acc, secret + secretLimit); + XXH3p_accumulate(acc, input + nbStripes * STRIPE_LEN, secret, totalStripes - nbStripes, accWidth); + *nbStripesSoFarPtr = (XXH32_hash_t)(totalStripes - nbStripes); + } else { + XXH3p_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, totalStripes, accWidth); + *nbStripesSoFarPtr += (XXH32_hash_t)totalStripes; + } +} + +XXH_FORCE_INLINE XXH_errorcode +XXH3p_update(XXH3p_state_t* state, const xxh_u8* input, size_t len, XXH3p_accWidth_e accWidth) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const xxh_u8* const bEnd = input + len; + + state->totalLen += len; + + if (state->bufferedSize + len <= XXH3p_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */ + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + /* input now > XXH3p_INTERNALBUFFER_SIZE */ + + #define XXH3p_INTERNALBUFFER_STRIPES (XXH3p_INTERNALBUFFER_SIZE / STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3p_INTERNALBUFFER_SIZE % STRIPE_LEN == 0); /* clean multiple */ + + if (state->bufferedSize) { /* some input within internal buffer: fill then consume it */ + size_t const loadSize = XXH3p_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3p_consumeStripes(state->acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3p_INTERNALBUFFER_STRIPES, + state->secret, state->secretLimit, + accWidth); + state->bufferedSize = 0; + } + + /* consume input by full buffer quantities */ + if (input+XXH3p_INTERNALBUFFER_SIZE <= bEnd) { + const xxh_u8* const limit = bEnd - XXH3p_INTERNALBUFFER_SIZE; + do { + XXH3p_consumeStripes(state->acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, XXH3p_INTERNALBUFFER_STRIPES, + state->secret, state->secretLimit, + accWidth); + input += XXH3p_INTERNALBUFFER_SIZE; + } while (input<=limit); + } + + if (input < bEnd) { /* some remaining input input : buffer it */ + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); + } + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_64bits_update(XXH3p_state_t* state, const void* input, size_t len) +{ + return XXH3p_update(state, (const xxh_u8*)input, len, XXH3p_acc_64bits); +} + + +XXH_FORCE_INLINE void +XXH3p_digest_long (XXH64_hash_t* acc, const XXH3p_state_t* state, XXH3p_accWidth_e accWidth) +{ + memcpy(acc, state->acc, sizeof(state->acc)); /* digest locally, state remains unaltered, and can continue ingesting more input afterwards */ + if (state->bufferedSize >= STRIPE_LEN) { + size_t const totalNbStripes = state->bufferedSize / STRIPE_LEN; + XXH32_hash_t nbStripesSoFar = state->nbStripesSoFar; + XXH3p_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, totalNbStripes, + state->secret, state->secretLimit, + accWidth); + if (state->bufferedSize % STRIPE_LEN) { /* one last partial stripe */ + XXH3p_accumulate_512(acc, + state->buffer + state->bufferedSize - STRIPE_LEN, + state->secret + state->secretLimit - XXH_SECRET_LASTACC_START, + accWidth); + } + } else { /* bufferedSize < STRIPE_LEN */ + if (state->bufferedSize) { /* one last stripe */ + xxh_u8 lastStripe[STRIPE_LEN]; + size_t const catchupSize = STRIPE_LEN - state->bufferedSize; + memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + XXH3p_accumulate_512(acc, + lastStripe, + state->secret + state->secretLimit - XXH_SECRET_LASTACC_START, + accWidth); + } } +} + +XXH_PUBLIC_API XXH64_hash_t XXH3p_64bits_digest (const XXH3p_state_t* state) +{ + if (state->totalLen > XXH3p_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[ACC_NB]; + XXH3p_digest_long(acc, state, XXH3p_acc_64bits); + return XXH3p_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)state->totalLen * PRIME64_1); + } + /* len <= XXH3p_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3p_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3p_64bits_withSecret(state->buffer, (size_t)(state->totalLen), state->secret, state->secretLimit + STRIPE_LEN); +} + +/* ========================================== + * XXH3 128 bits (=> XXH128) + * ========================================== */ + +XXH_FORCE_INLINE XXH128_hash_t +XXH3p_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1) + (((xxh_u32)c2) << 8) + (((xxh_u32)c3) << 16) + (((xxh_u32)len) << 24); + xxh_u32 const combinedh = XXH_swap32(combinedl); + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ (XXH_readLE32(secret) + seed); + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ (XXH_readLE32(secret+4) - seed); + xxh_u64 const mixedl = keyed_lo * PRIME64_1; + xxh_u64 const mixedh = keyed_hi * PRIME64_5; + XXH128_hash_t const h128 = { XXH3p_avalanche(mixedl) /*low64*/, XXH3p_avalanche(mixedh) /*high64*/ }; + return h128; + } +} + + +XXH_FORCE_INLINE XXH128_hash_t +XXH3p_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64_lo = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const input_64_hi = XXH_swap64(input_64_lo); + xxh_u64 const keyed_lo = input_64_lo ^ (XXH_readLE64(secret) + seed); + xxh_u64 const keyed_hi = input_64_hi ^ (XXH_readLE64(secret + 8) - seed); + xxh_u64 const mix64l1 = len + ((keyed_lo ^ (keyed_lo >> 51)) * PRIME32_1); + xxh_u64 const mix64l2 = (mix64l1 ^ (mix64l1 >> 47)) * PRIME64_2; + xxh_u64 const mix64h1 = ((keyed_hi ^ (keyed_hi >> 47)) * PRIME64_1) - len; + xxh_u64 const mix64h2 = (mix64h1 ^ (mix64h1 >> 43)) * PRIME64_4; + { XXH128_hash_t const h128 = { XXH3p_avalanche(mix64l2) /*low64*/, XXH3p_avalanche(mix64h2) /*high64*/ }; + return h128; + } } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3p_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed); + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret+8) - seed); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi, PRIME64_1); + xxh_u64 const lenContrib = XXH_mult32to64(len, PRIME32_5); + m128.low64 += lenContrib; + m128.high64 += input_hi * PRIME64_1; + m128.low64 ^= (m128.high64 >> 32); + { XXH128_hash_t h128 = XXH_mult64to128(m128.low64, PRIME64_2); + h128.high64 += m128.high64 * PRIME64_2; + h128.low64 = XXH3p_avalanche(h128.low64); + h128.high64 = XXH3p_avalanche(h128.high64); + return h128; + } } +} + +/* Assumption : `secret` size is >= 16 + * Note : it should be >= XXH3p_SECRET_SIZE_MIN anyway */ +XXH_FORCE_INLINE XXH128_hash_t +XXH3p_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3p_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3p_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3p_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t const h128 = { 0, 0 }; + return h128; + } } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3p_hashLong_128b_internal(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[ACC_NB] = XXH3p_INIT_ACC; + + XXH3p_hashLong_internal_loop(acc, input, len, secret, secretSize, XXH3p_acc_128bits); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { xxh_u64 const low64 = XXH3p_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * PRIME64_1); + xxh_u64 const high64 = XXH3p_mergeAccs(acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((xxh_u64)len * PRIME64_2)); + XXH128_hash_t const h128 = { low64, high64 }; + return h128; + } +} + +XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3p_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ +XXH3p_hashLong_128b_defaultSecret(const xxh_u8* input, size_t len) +{ + return XXH3p_hashLong_128b_internal(input, len, kSecret, sizeof(kSecret)); +} + +XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3p_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ +XXH3p_hashLong_128b_withSecret(const xxh_u8* input, size_t len, + const xxh_u8* secret, size_t secretSize) +{ + return XXH3p_hashLong_128b_internal(input, len, secret, secretSize); +} + +XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3p_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */ +XXH3p_hashLong_128b_withSeed(const xxh_u8* input, size_t len, XXH64_hash_t seed) +{ + XXH_ALIGN(8) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + if (seed == 0) return XXH3p_hashLong_128b_defaultSecret(input, len); + XXH3p_initCustomSecret(secret, seed); + return XXH3p_hashLong_128b_internal(input, len, secret, sizeof(secret)); +} + + +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3p_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3p_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + +XXH_NO_INLINE XXH128_hash_t +XXH3p_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3p_MIDSIZE_MAX); + + { XXH128_hash_t acc; + int const nbRounds = (int)len / 32; + int i; + acc.low64 = len * PRIME64_1; + acc.high64 = 0; + for (i=0; i<4; i++) { + acc = XXH128_mix32B(acc, input+(32*i), input+(32*i)+16, secret+(32*i), seed); + } + acc.low64 = XXH3p_avalanche(acc.low64); + acc.high64 = XXH3p_avalanche(acc.high64); + XXH_ASSERT(nbRounds >= 4); + for (i=4 ; i < nbRounds; i++) { + acc = XXH128_mix32B(acc, input+(32*i), input+(32*i)+16, secret+XXH3p_MIDSIZE_STARTOFFSET+(32*(i-4)), seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, input + len - 16, input + len - 32, secret + XXH3p_SECRET_SIZE_MIN - XXH3p_MIDSIZE_LASTOFFSET - 16, 0ULL - seed); + + { xxh_u64 const low64 = acc.low64 + acc.high64; + xxh_u64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2); + XXH128_hash_t const h128 = { XXH3p_avalanche(low64), (XXH64_hash_t)0 - XXH3p_avalanche(high64) }; + return h128; + } + } +} + + +XXH_FORCE_INLINE XXH128_hash_t +XXH3p_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * PRIME64_1; + acc.high64 = 0; + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); + { xxh_u64 const low64 = acc.low64 + acc.high64; + xxh_u64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2); + XXH128_hash_t const h128 = { XXH3p_avalanche(low64), (XXH64_hash_t)0 - XXH3p_avalanche(high64) }; + return h128; + } + } +} + +XXH_PUBLIC_API XXH128_hash_t XXH3p_128bits(const void* input, size_t len) +{ + if (len <= 16) return XXH3p_len_0to16_128b((const xxh_u8*)input, len, kSecret, 0); + if (len <= 128) return XXH3p_len_17to128_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); + if (len <= XXH3p_MIDSIZE_MAX) return XXH3p_len_129to240_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0); + return XXH3p_hashLong_128b_defaultSecret((const xxh_u8*)input, len); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH3p_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +{ + XXH_ASSERT(secretSize >= XXH3p_SECRET_SIZE_MIN); + /* if an action must be taken should `secret` conditions not be respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash */ + if (len <= 16) return XXH3p_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, 0); + if (len <= 128) return XXH3p_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); + if (len <= XXH3p_MIDSIZE_MAX) return XXH3p_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0); + return XXH3p_hashLong_128b_withSecret((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH3p_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +{ + if (len <= 16) return XXH3p_len_0to16_128b((const xxh_u8*)input, len, kSecret, seed); + if (len <= 128) return XXH3p_len_17to128_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); + if (len <= XXH3p_MIDSIZE_MAX) return XXH3p_len_129to240_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed); + return XXH3p_hashLong_128b_withSeed((const xxh_u8*)input, len, seed); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH128(const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3p_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ + +/* all the functions are actually the same as for 64-bit streaming variant, + just the reset one is different (different initial acc values for 0,5,6,7), + and near the end of the digest function */ + +static void +XXH3p_128bits_reset_internal(XXH3p_state_t* statePtr, + XXH64_hash_t seed, + const xxh_u8* secret, size_t secretSize) +{ + XXH3p_64bits_reset_internal(statePtr, seed, secret, secretSize); +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_128bits_reset(XXH3p_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3p_128bits_reset_internal(statePtr, 0, kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_128bits_reset_withSecret(XXH3p_state_t* statePtr, const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3p_128bits_reset_internal(statePtr, 0, (const xxh_u8*)secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3p_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_128bits_reset_withSeed(XXH3p_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3p_128bits_reset_internal(statePtr, seed, kSecret, XXH_SECRET_DEFAULT_SIZE); + XXH3p_initCustomSecret(statePtr->customSecret, seed); + statePtr->secret = statePtr->customSecret; + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH3p_128bits_update(XXH3p_state_t* state, const void* input, size_t len) +{ + return XXH3p_update(state, (const xxh_u8*)input, len, XXH3p_acc_128bits); +} + +XXH_PUBLIC_API XXH128_hash_t XXH3p_128bits_digest (const XXH3p_state_t* state) +{ + if (state->totalLen > XXH3p_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[ACC_NB]; + XXH3p_digest_long(acc, state, XXH3p_acc_128bits); + XXH_ASSERT(state->secretLimit + STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { xxh_u64 const low64 = XXH3p_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)state->totalLen * PRIME64_1); + xxh_u64 const high64 = XXH3p_mergeAccs(acc, state->secret + state->secretLimit + STRIPE_LEN - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((xxh_u64)state->totalLen * PRIME64_2)); + XXH128_hash_t const h128 = { low64, high64 }; + return h128; + } + } + /* len <= XXH3p_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3p_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3p_128bits_withSecret(state->buffer, (size_t)(state->totalLen), state->secret, state->secretLimit + STRIPE_LEN); +} + +/* 128-bit utility functions */ + +#include /* memcmp */ + +/* return : 1 is equal, 0 if different */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + memcpy(dst, &hash.high64, sizeof(hash.high64)); + memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + + + +#endif /* XXH3p_H */ diff --git a/util/xxhash.cc b/util/xxhash.cc index 91343f674..b35473e1a 100644 --- a/util/xxhash.cc +++ b/util/xxhash.cc @@ -1,97 +1,82 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /* -xxHash - Fast Hash algorithm -Copyright (C) 2012-2014, Yann Collet. -BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -You can contact the author at : -- xxHash source repository : http://code.google.com/p/xxhash/ +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash */ -//************************************** -// Tuning parameters -//************************************** +/* since xxhash.c can be included (via XXH_INLINE_ALL), + * it's good practice to protect it with guard + * in case of multiples inclusions */ +#ifndef XXHASH_C_01393879 +#define XXHASH_C_01393879 + +/* ************************************* +* Tuning parameters +***************************************/ /*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is - * safe and portable. Unfortunately, on some target/compiler combinations, the - * generated assembly is sub-optimal. The below switch allow to select different - * access method for improved performance. Method 0 (default) : use `memcpy()`. - * Safe and portable. Method 1 : `__packed` statement. It depends on compiler - * extension (ie, not portable). This method is safe if your compiler supports - * it, and *generally* as fast or faster than `memcpy`. Method 2 : direct - * access. This method doesn't depend on compiler but violate C standard. It can - * generate buggy code on targets which do not support unaligned memory - * accesses. But in some circumstances, it's the only known way to get the most - * performance (ie GCC + ARMv6) See http://stackoverflow.com/a/32095106/646947 - * for details. Prefer these methods in priority order (0 > 1 > 2) + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) */ - -#include "util/util.h" - -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line \ - for example */ -#if defined(__GNUC__) && \ - (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ - defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ - defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)) -#define XXH_FORCE_MEMORY_ACCESS 2 -#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - (defined(__GNUC__) && \ - (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ - defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ - defined(__ARM_ARCH_7S__))) -#define XXH_FORCE_MEMORY_ACCESS 1 -#endif +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7))) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif #endif -// Unaligned memory access is automatically enabled for "common" CPU, such as x86. -// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected. -// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance. -// You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32). -#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_USE_UNALIGNED_ACCESS 1 +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. + * When this macro is enabled, xxHash actively checks input for null pointer. + * It it is, result for null input pointers is the same as a null-length input. + */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 #endif -// XXH_ACCEPT_NULL_INPUT_POINTER : -// If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. -// When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. -// This option has a very small performance cost (only measurable on small inputs). -// By default, this option is disabled. To enable it, uncomment below define : -//#define XXH_ACCEPT_NULL_INPUT_POINTER 1 - -// XXH_FORCE_NATIVE_FORMAT : -// By default, xxHash library provides endian-independent Hash values, based on little-endian convention. -// Results are therefore identical for little-endian and big-endian CPU. -// This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. -// Should endian-independence be of no importance for your application, you may set the #define below to 1. -// It will improve speed for Big-endian CPU. -// This option has no impact on Little_Endian CPU. -#define XXH_FORCE_NATIVE_FORMAT 0 - /*!XXH_FORCE_ALIGN_CHECK : * This is a minor performance trick, only useful with lots of very small keys. * It means : check for aligned/unaligned input. @@ -100,976 +85,1069 @@ You can contact the author at : * or when alignment doesn't matter for performance. */ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || \ - defined(_M_X64) -#define XXH_FORCE_ALIGN_CHECK 0 -#else -#define XXH_FORCE_ALIGN_CHECK 1 -#endif -#endif - -//************************************** -// Compiler Specific Options -//************************************** -// Disable some Visual warning messages -#ifdef _MSC_VER // Visual Studio -# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant -# pragma warning(disable : 4804) // disable: C4804: 'operation' : unsafe use of type 'bool' in operation (static assert line 313) +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif #endif -#ifdef _MSC_VER // Visual Studio -# define FORCE_INLINE static __forceinline -#else -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) +/*!XXH_REROLL: + * Whether to reroll XXH32_finalize, and XXH64_finalize, + * instead of using an unrolled jump table/if statement loop. + * + * This is automatically defined on -Os/-Oz on GCC and Clang. */ +#ifndef XXH_REROLL +# if defined(__OPTIMIZE_SIZE__) +# define XXH_REROLL 1 # else -# define FORCE_INLINE static inline +# define XXH_REROLL 0 # endif #endif - -//************************************** -// Includes & Memory related functions -//************************************** -#include "xxhash.h" -// Modify the local functions below should you wish to use some other memory related routines -// for malloc(), free() +/* ************************************* +* Includes & Memory related functions +***************************************/ +/*! Modify the local functions below should you wish to use some other memory routines +* for malloc(), free() */ #include -FORCE_INLINE void* XXH_malloc(size_t s) { return malloc(s); } -FORCE_INLINE void XXH_free (void* p) { free(p); } -// for memcpy() +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/*! and for memcpy() */ #include -FORCE_INLINE void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } -#include /* assert */ - -namespace rocksdb { -//************************************** -// Basic Types -//************************************** -#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99 -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#include /* ULLONG_MAX */ + +#define XXH_STATIC_LINKING_ONLY +#include "xxhash.h" + +/* BEGIN RocksDB customizations */ +#include "util/util.h" /* for FALLTHROUGH_INTENDED, inserted as appropriate */ +/* END RocksDB customizations */ + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) #else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define XXH_FORCE_INLINE static inline __attribute__((always_inline)) +# define XXH_NO_INLINE static __attribute__((noinline)) +# else +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +# endif +# else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +# endif /* __STDC_VERSION__ */ #endif -#if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS) -# define _PACKED __attribute__ ((packed)) -#else -# define _PACKED + + +/* ************************************* +* Debug +***************************************/ +/* DEBUGLEVEL is expected to be defined externally, + * typically through compiler command line. + * Value must be a number. */ +#ifndef DEBUGLEVEL +# define DEBUGLEVEL 0 #endif -#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__) -# ifdef __IBMC__ -# pragma pack(1) -# else -# pragma pack(push, 1) -# endif +#if (DEBUGLEVEL>=1) +# include /* note : can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# define XXH_ASSERT(c) ((void)0) #endif -typedef struct _U32_S { U32 v; } _PACKED U32_S; +/* note : use after variable declarations */ +#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } -#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__) -# pragma pack(pop) + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; #endif +typedef XXH32_hash_t xxh_u32; + -#define A32(x) (((U32_S *)(x))->v) +/* === Memory access === */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2)) +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) -/* Force direct memory access. Only works on CPU which support unaligned memory - * access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*)memPtr; } +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1)) +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) -/* __pack instructions are safer, but compiler specific, hence potentially - * problematic for some compilers */ +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ -typedef union { - U32 u32; -} __attribute__((packed)) unalign; -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +static xxh_u32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ -static U32 XXH_read32(const void* memPtr) { - U32 val; - memcpy(&val, memPtr, sizeof(val)); - return val; +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; } -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + -//*************************************** -// Compiler-specific Functions and Macros -//*************************************** -#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +/* === Endianess === */ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +static int XXH_isLittleEndian(void) +{ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif -// Note : although _rotl exists for minGW (GCC under windows), performance seems poor -#if defined(_MSC_VER) + + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#ifndef __has_builtin +# define __has_builtin(x) 0 +#endif + +#if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) && __has_builtin(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) # define XXH_rotl32(x,r) _rotl(x,r) -#define XXH_rotl64(x, r) _rotl64(x, r) +# define XXH_rotl64(x,r) _rotl64(x,r) #else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -#define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r))) +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) #endif -#if defined(_MSC_VER) // Visual Studio +#if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap32 _byteswap_ulong -#elif GCC_VERSION >= 403 +#elif XXH_GCC_VERSION >= 403 # define XXH_swap32 __builtin_bswap32 #else -static inline U32 XXH_swap32 (U32 x) { +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff );} + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} #endif -//************************************** -// Constants -//************************************** -#define PRIME32_1 2654435761U -#define PRIME32_2 2246822519U -#define PRIME32_3 3266489917U -#define PRIME32_4 668265263U -#define PRIME32_5 374761393U +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} -//************************************** -// Architecture Macros -//************************************** -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; -#ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch - static const int one = 1; -# define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one)) -#endif +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} -//************************************** -// Macros -//************************************** -#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } // use only *after* variable declarations +/* ************************************* +* Misc +***************************************/ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } -//**************************** -// Memory reads -//**************************** -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; -FORCE_INLINE U32 XXH_readLE32_align(const U32* ptr, XXH_endianess endian, XXH_alignment align) +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +static const xxh_u32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */ +static const xxh_u32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */ +static const xxh_u32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */ +static const xxh_u32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */ +static const xxh_u32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */ + +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr)); - else - return endian==XXH_littleEndian ? *ptr : XXH_swap32(*ptr); -} - -FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, - XXH_alignment align) { - if (align == XXH_unaligned) - return endian == XXH_littleEndian ? XXH_read32(ptr) - : XXH_swap32(XXH_read32(ptr)); - else - return endian == XXH_littleEndian ? *(const U32*)ptr - : XXH_swap32(*(const U32*)ptr); + acc += input * PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= PRIME32_1; +#if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* UGLY HACK: + * This inline assembly hack forces acc into a normal register. This is the + * only thing that prevents GCC and Clang from autovectorizing the XXH32 loop + * (pragmas and attributes don't work for some resason) without globally + * disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on newer chips!) + * making it slightly slower to multiply four integers at once compared to four + * integers independently. Even when pmulld was fastest, Sandy/Ivy Bridge, it is + * still not worth it to go into SSE just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because the + * SIMD actually serializes this operation: While v1 is rotating, v2 can load data, + * while v3 can multiply. SSE forces them to operate together. + * + * How this hack works: + * __asm__("" // Declare an assembly block but don't declare any instructions + * : // However, as an Input/Output Operand, + * "+r" // constrain a read/write operand (+) as a general purpose register (r). + * (acc) // and set acc as the operand + * ); + * + * Because of the 'r', the compiler has promised that seed will be in a + * general purpose register and the '+' says that it will be 'read/write', + * so it has to assume it has changed. It is like volatile without all the + * loads and stores. + * + * Since the argument has to be in a normal register (not an SSE register), + * each time XXH32_round is called, it is impossible to vectorize. */ + __asm__("" : "+r" (acc)); +#endif + return acc; } -FORCE_INLINE U32 XXH_readLE32(const U32* ptr, XXH_endianess endian) { - return XXH_readLE32_align(ptr, endian, XXH_unaligned); +/* mix all bits */ +static xxh_u32 XXH32_avalanche(xxh_u32 h32) +{ + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + return(h32); } -//**************************** -// Simple Hash Functions -//**************************** -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) +#define XXH_get32bits(p) XXH_readLE32_align(p, align) -FORCE_INLINE U32 XXH32_endian_align(const void* input, int len, U32 seed, XXH_endianess endian, XXH_alignment align) +static xxh_u32 +XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) { - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - U32 h32; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { len=0; p=(const BYTE*)(size_t)16; } -#endif - - if (len>=16) - { - const BYTE* const limit = bEnd - 16; - U32 v1 = seed + PRIME32_1 + PRIME32_2; - U32 v2 = seed + PRIME32_2; - U32 v3 = seed + 0; - U32 v4 = seed - PRIME32_1; - - do - { - v1 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4; - v2 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4; - v3 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4; - v4 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4; - } while (p<=limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } - else - { - h32 = seed + PRIME32_5; +#define PROCESS1 \ + h32 += (*ptr++) * PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; + +#define PROCESS4 \ + h32 += XXH_get32bits(ptr) * PRIME32_3; \ + ptr+=4; \ + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + + /* Compact rerolled version */ + if (XXH_REROLL) { + len &= 15; + while (len >= 4) { + PROCESS4; + len -= 4; + } + while (len > 0) { + PROCESS1; + --len; + } + return XXH32_avalanche(h32); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 8: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 4: PROCESS4; + return XXH32_avalanche(h32); + + case 13: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 9: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 5: PROCESS4; + PROCESS1; + return XXH32_avalanche(h32); + + case 14: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 10: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 6: PROCESS4; + PROCESS1; + PROCESS1; + return XXH32_avalanche(h32); + + case 15: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 11: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 7: PROCESS4; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 3: PROCESS1; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 2: PROCESS1; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 1: PROCESS1; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 0: return XXH32_avalanche(h32); + } + XXH_ASSERT(0); + return h32; /* reaching this point is deemed impossible */ } +} - h32 += (U32) len; +XXH_FORCE_INLINE xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + const xxh_u8* bEnd = input + len; + xxh_u32 h32; - while (p<=bEnd-4) - { - h32 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - p+=4; +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (input==NULL) { + len=0; + bEnd=input=(const xxh_u8*)(size_t)16; } +#endif - while (p=16) { + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + PRIME32_1 + PRIME32_2; + xxh_u32 v2 = seed + PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; } - h32 ^= h32 >> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; + h32 += (xxh_u32)len; - return h32; + return XXH32_finalize(h32, input, len&15, align); } -U32 XXH32(const void* input, int len, U32 seed) +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) { #if 0 - // Simple version, good for code maintenance, but unfortunately slow for small inputs - void* state = XXH32_init(seed); - XXH32_update(state, input, len); - return XXH32_digest(state); + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); + #else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - -# if !defined(XXH_USE_UNALIGNED_ACCESS) - if ((((size_t)input) & 3)) // Input is aligned, let's leverage the speed advantage - { - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } -# endif - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); #endif } -//**************************** -// Advanced Hash Functions -//**************************** -struct XXH_state32_t -{ - U64 total_len; - U32 seed; - U32 v1; - U32 v2; - U32 v3; - U32 v4; - int memsize; - char memory[16]; -}; - - -int XXH32_sizeofState() +/*====== Hash streaming ======*/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) { - XXH_STATIC_ASSERT(XXH32_SIZEOFSTATE >= sizeof(struct XXH_state32_t)); // A compilation error here means XXH32_SIZEOFSTATE is not large enough - return sizeof(struct XXH_state32_t); + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); } - - -XXH_errorcode XXH32_resetState(void* state_in, U32 seed) +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) { - struct XXH_state32_t * state = (struct XXH_state32_t *) state_in; - state->seed = seed; - state->v1 = seed + PRIME32_1 + PRIME32_2; - state->v2 = seed + PRIME32_2; - state->v3 = seed + 0; - state->v4 = seed - PRIME32_1; - state->total_len = 0; - state->memsize = 0; + XXH_free(statePtr); return XXH_OK; } +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} -void* XXH32_init (U32 seed) +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) { - void* state = XXH_malloc (sizeof(struct XXH_state32_t)); - XXH32_resetState(state, seed); - return state; + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; } -FORCE_INLINE XXH_errorcode XXH32_update_endian (void* state_in, const void* input, int len, XXH_endianess endian) +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) { - struct XXH_state32_t * state = (struct XXH_state32_t *) state_in; - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; #endif - state->total_len += len; + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; - if (state->memsize + len < 16) // fill in tmp buffer - { - XXH_memcpy(state->memory + state->memsize, input, len); - state->memsize += len; - return XXH_OK; - } + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); - if (state->memsize) // some data left from previous update - { - XXH_memcpy(state->memory + state->memsize, input, 16-state->memsize); - { - const U32* p32 = (const U32*)state->memory; - state->v1 += XXH_readLE32(p32, endian) * PRIME32_2; state->v1 = XXH_rotl32(state->v1, 13); state->v1 *= PRIME32_1; p32++; - state->v2 += XXH_readLE32(p32, endian) * PRIME32_2; state->v2 = XXH_rotl32(state->v2, 13); state->v2 *= PRIME32_1; p32++; - state->v3 += XXH_readLE32(p32, endian) * PRIME32_2; state->v3 = XXH_rotl32(state->v3, 13); state->v3 *= PRIME32_1; p32++; - state->v4 += XXH_readLE32(p32, endian) * PRIME32_2; state->v4 = XXH_rotl32(state->v4, 13); state->v4 *= PRIME32_1; p32++; + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; } - p += 16-state->memsize; - state->memsize = 0; - } - if (p <= bEnd-16) - { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do - { - v1 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4; - v2 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4; - v3 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4; - v4 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + xxh_u32 v1 = state->v1; + xxh_u32 v2 = state->v2; + xxh_u32 v3 = state->v3; + xxh_u32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } - if (p < bEnd) - { - XXH_memcpy(state->memory, p, bEnd-p); - state->memsize = (int)(bEnd-p); + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } } return XXH_OK; } -XXH_errorcode XXH32_update (void* state_in, const void* input, int len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH32_update_endian(state_in, input, len, XXH_bigEndian); -} - - -FORCE_INLINE U32 XXH32_intermediateDigest_endian (void* state_in, XXH_endianess endian) +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state) { - struct XXH_state32_t * state = (struct XXH_state32_t *) state_in; - const BYTE * p = (const BYTE*)state->memory; - BYTE* bEnd = (BYTE*)state->memory + state->memsize; - U32 h32; - - if (state->total_len >= 16) - { - h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); - } - else - { - h32 = state->seed + PRIME32_5; + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; } - h32 += (U32) state->total_len; + h32 += state->total_len_32; - while (p<=bEnd-4) - { - h32 += XXH_readLE32((const U32*)p, endian) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4; - p+=4; - } + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} - while (p> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; -} +/*====== Canonical representation ======*/ +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, remaining comparable across different systems. +*/ -U32 XXH32_intermediateDigest (void* state_in) +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) { - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_intermediateDigest_endian(state_in, XXH_littleEndian); - else - return XXH32_intermediateDigest_endian(state_in, XXH_bigEndian); + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); } - -U32 XXH32_digest (void* state_in) +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) { - U32 h32 = XXH32_intermediateDigest(state_in); + return XXH_readBE32(src); +} - XXH_free(state_in); - return h32; -} +#ifndef XXH_NO_LONG_LONG /* ******************************************************************* - * 64-bit hash functions - *********************************************************************/ +* 64-bit hash functions +*********************************************************************/ + +/*====== Memory access ======*/ + +typedef XXH64_hash_t xxh_u64; + + +/*! XXH_REROLL_XXH64: + * Whether to reroll the XXH64_finalize() loop. + * + * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a performance gain + * on 64-bit hosts, as only one jump is required. + * + * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit registers, + * and 64-bit arithmetic needs to be simulated, it isn't beneficial to unroll. The code becomes + * ridiculously large (the largest function in the binary on i386!), and rerolling it saves + * anywhere from 3kB to 20kB. It is also slightly faster because it fits into cache better + * and is more likely to be inlined by the compiler. + * + * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. */ +#ifndef XXH_REROLL_XXH64 +# if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \ + || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \ + || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \ + || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \ + || defined(__mips64__) || defined(__mips64)) /* mips64 */ \ + || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */ +# define XXH_REROLL_XXH64 1 +# else +# define XXH_REROLL_XXH64 0 +# endif +#endif /* !defined(XXH_REROLL_XXH64) */ - #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ - static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; } - #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ - /* currently only defined for gcc and icc */ - typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; - static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +static xxh_u64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } - #else +#else - /* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} - static U64 XXH_read64(const void* memPtr) - { - U64 val; - memcpy(&val, memPtr, sizeof(val)); - return val; - } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ -#if defined(_MSC_VER) /* Visual Studio */ -#define XXH_swap64 _byteswap_uint64 +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 #elif XXH_GCC_VERSION >= 403 -#define XXH_swap64 __builtin_bswap64 +# define XXH_swap64 __builtin_bswap64 #else -static U64 XXH_swap64(U64 x) { - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); +static xxh_u64 XXH_swap64 (xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); } #endif -FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, - XXH_alignment align) { - if (align == XXH_unaligned) - return endian == XXH_littleEndian ? XXH_read64(ptr) - : XXH_swap64(XXH_read64(ptr)); - else - return endian == XXH_littleEndian ? *(const U64*)ptr - : XXH_swap64(*(const U64*)ptr); +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); } -FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) { - return XXH_readLE64_align(ptr, endian, XXH_unaligned); +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); } -static U64 XXH_readBE64(const void* ptr) { - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); } + /*====== xxh64 ======*/ -static const U64 PRIME64_1 = - 11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 - */ -static const U64 PRIME64_2 = - 14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 - */ -static const U64 PRIME64_3 = - 1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 - */ -static const U64 PRIME64_4 = - 9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 - */ -static const U64 PRIME64_5 = - 2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 - */ - -static U64 XXH64_round(U64 acc, U64 input) { - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; +static const xxh_u64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */ +static const xxh_u64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */ +static const xxh_u64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */ +static const xxh_u64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */ +static const xxh_u64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; } -static U64 XXH64_mergeRound(U64 acc, U64 val) { - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; } -static U64 XXH64_avalanche(U64 h64) { - h64 ^= h64 >> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - return h64; +static xxh_u64 XXH64_avalanche(xxh_u64 h64) +{ + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; } -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) - -static U64 XXH64_finalize(U64 h64, const void* ptr, size_t len, - XXH_endianess endian, XXH_alignment align) { - const BYTE* p = (const BYTE*)ptr; - -#define PROCESS1_64 \ - h64 ^= (*p++) * PRIME64_5; \ - h64 = XXH_rotl64(h64, 11) * PRIME64_1; - -#define PROCESS4_64 \ - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ - p += 4; \ - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - -#define PROCESS8_64 \ - { \ - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ - p += 8; \ - h64 ^= k1; \ - h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; \ - } - - switch (len & 31) { - case 24: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 16: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 8: - PROCESS8_64; - return XXH64_avalanche(h64); - - case 28: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 20: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 12: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 4: - PROCESS4_64; - return XXH64_avalanche(h64); - - case 25: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 17: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 9: - PROCESS8_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 29: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 21: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 13: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 5: - PROCESS4_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 26: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 18: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 10: - PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 30: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 22: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 14: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 6: - PROCESS4_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 27: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 19: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 11: - PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 31: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 23: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 15: - PROCESS8_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 7: - PROCESS4_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 3: - PROCESS1_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 2: - PROCESS1_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 1: - PROCESS1_64; - FALLTHROUGH_INTENDED; - /* fallthrough */ - case 0: - return XXH64_avalanche(h64); - } - - /* impossible to reach */ - assert(0); - return 0; /* unreachable, but some compilers complain without it */ + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +static xxh_u64 +XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define PROCESS1_64 \ + h64 ^= (*ptr++) * PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + +#define PROCESS4_64 \ + h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * PRIME64_1; \ + ptr+=4; \ + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + +#define PROCESS8_64 { \ + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \ + ptr+=8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ +} + + /* Rerolled version for 32-bit targets is faster and much smaller. */ + if (XXH_REROLL || XXH_REROLL_XXH64) { + len &= 31; + while (len >= 8) { + PROCESS8_64; + len -= 8; + } + if (len >= 4) { + PROCESS4_64; + len -= 4; + } + while (len > 0) { + PROCESS1_64; + --len; + } + return XXH64_avalanche(h64); + } else { + switch(len & 31) { + case 24: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 16: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 8: PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 20: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 12: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 4: PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 17: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 9: PROCESS8_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 21: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 13: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 5: PROCESS4_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 18: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 10: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 22: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 14: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 6: PROCESS4_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 19: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 11: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 23: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 15: PROCESS8_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 7: PROCESS4_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 3: PROCESS1_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 2: PROCESS1_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 1: PROCESS1_64; + FALLTHROUGH_INTENDED; + /* fallthrough */ + case 0: return XXH64_avalanche(h64); + } + } + /* impossible to reach */ + XXH_ASSERT(0); + return 0; /* unreachable, but some compilers complain without it */ } -FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, - XXH_endianess endian, XXH_alignment align) { - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U64 h64; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ - (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) - if (p == NULL) { - len = 0; - bEnd = p = (const BYTE*)(size_t)32; - } +XXH_FORCE_INLINE xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + const xxh_u8* bEnd = input + len; + xxh_u64 h64; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (input==NULL) { + len=0; + bEnd=input=(const xxh_u8*)(size_t)32; + } #endif - if (len >= 32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); - p += 8; - v2 = XXH64_round(v2, XXH_get64bits(p)); - p += 8; - v3 = XXH64_round(v3, XXH_get64bits(p)); - p += 8; - v4 = XXH64_round(v4, XXH_get64bits(p)); - p += 8; - } while (p <= limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + - XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64)len; - - return XXH64_finalize(h64, p, len, endian, align); + if (len>=32) { + const xxh_u8* const limit = bEnd - 32; + xxh_u64 v1 = seed + PRIME64_1 + PRIME64_2; + xxh_u64 v2 = seed + PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (xxh_u64) len; + + return XXH64_finalize(h64, input, len, align); } -unsigned long long XXH64(const void* input, size_t len, - unsigned long long seed) { + +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed) +{ #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_state_t state; XXH64_reset(&state, seed); - XXH64_update(&state, input, len); + XXH64_update(&state, (const xxh_u8*)input, len); return XXH64_digest(&state); + #else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7) == - 0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, - XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } - } - if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, - XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + #endif } /*====== Hash Streaming ======*/ -XXH64_state_t* XXH64_createState(void) { - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); } -XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) { - XXH_free(statePtr); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; } -void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) { - memcpy(dstState, srcState, sizeof(*dstState)); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); } -XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) { - XXH64_state_t state; /* using a local state to memcpy() in order to avoid - strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + /* do not write into reserved64, might be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64)); + return XXH_OK; } -FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t* state, - const void* input, size_t len, - XXH_endianess endian) { - if (input == NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ - (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH64_state_t* state, const void* input, size_t len) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; #else - return XXH_ERROR; + return XXH_ERROR; #endif - { - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; - state->total_len += len; + state->total_len += len; - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; - } + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, - 32 - state->memsize); - state->v1 = - XXH64_round(state->v1, XXH_readLE64(state->mem64 + 0, endian)); - state->v2 = - XXH64_round(state->v2, XXH_readLE64(state->mem64 + 1, endian)); - state->v3 = - XXH64_round(state->v3, XXH_readLE64(state->mem64 + 2, endian)); - state->v4 = - XXH64_round(state->v4, XXH_readLE64(state->mem64 + 3, endian)); - p += 32 - state->memsize; - state->memsize = 0; - } + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3)); + p += 32-state->memsize; + state->memsize = 0; + } - if (p + 32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); - p += 8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); - p += 8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); - p += 8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); - p += 8; - } while (p <= limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + xxh_u64 v1 = state->v1; + xxh_u64 v2 = state->v2; + xxh_u64 v3 = state->v3; + xxh_u64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd - p)); - state->memsize = (unsigned)(bEnd - p); + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } } - } - return XXH_OK; + return XXH_OK; } -XXH_errorcode XXH64_update(XXH64_state_t* state_in, const void* input, - size_t len) { - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} -FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t* state, - XXH_endianess endian) { - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + - XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 /*seed*/ + PRIME64_5; - } - - h64 += (U64)state->total_len; - - return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, - XXH_aligned); -} +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + xxh_u64 const v1 = state->v1; + xxh_u64 const v2 = state->v2; + xxh_u64 const v3 = state->v3; + xxh_u64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 /*seed*/ + PRIME64_5; + } -unsigned long long XXH64_digest(const XXH64_state_t* state_in) { - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + h64 += (xxh_u64) state->total_len; - if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); } + /*====== Canonical representation ======*/ -void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) { - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); } -XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) { - return XXH_readBE64(src); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); } -} // namespace rocksdb + + + +/* ********************************************************************* +* XXH3 +* New generation hash designed for speed on small keys and vectorization +************************************************************************ */ + +#include "xxh3p.h" /* XXH3 preview for RocksDB */ + + +#endif /* XXH_NO_LONG_LONG */ + +#endif /* XXHASH_C_01393879 */ diff --git a/util/xxhash.h b/util/xxhash.h index 515375506..59d9b97d6 100644 --- a/util/xxhash.h +++ b/util/xxhash.h @@ -1,8 +1,12 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /* - xxHash - Fast Hash algorithm + xxHash - Extremely Fast Hash algorithm Header File - Copyright (C) 2012-2014, Yann Collet. + Copyright (C) 2012-2016, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without @@ -29,7 +33,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - xxHash source repository : http://code.google.com/p/xxhash/ + - xxHash source repository : https://github.com/Cyan4973/xxHash */ /* Notice extracted from xxHash homepage : @@ -49,193 +53,546 @@ Lookup3 1.2 GB/s 9 Bob Jenkins SuperFastHash 1.2 GB/s 1 Paul Hsieh CityHash64 1.05 GB/s 10 Pike & Alakuijala FNV 0.55 GB/s 5 Fowler, Noll, Vo -CRC32 0.43 GB/s 9 +CRC32 0.43 GB/s # 9 MD5-32 0.33 GB/s 10 Ronald L. Rivest SHA1-32 0.28 GB/s 10 +Note #: other CRC32 implementations can be over 40x faster than SMHasher's: +http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 + Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. -*/ -#pragma once +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ -#include +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 -#if !defined(__VMS) && \ - (defined(__cplusplus) || \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) -#include +/* BEGIN RocksDB customizations */ +#ifndef XXH_STATIC_LINKING_ONLY +#define XXH_STATIC_LINKING_ONLY 1 /* access experimental APIs like XXH3 */ #endif +#define XXH_NAMESPACE ROCKSDB_ +/* END RocksDB customizations */ #if defined (__cplusplus) -namespace rocksdb { +extern "C" { #endif -//**************************** -// Type -//**************************** -/* size_t */ +/* **************************** +* Definitions +******************************/ +#include /* size_t */ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; +/* **************************** + * API modifier + ******************************/ +/** XXH_INLINE_ALL (and XXH_PRIVATE_API) + * This build macro includes xxhash functions in `static` mode + * in order to inline them, and remove their symbol from the public list. + * Inlining offers great performance improvement on small keys, + * and dramatic ones when length is expressed as a compile-time constant. + * See https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html . + * Methodology : + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * `xxhash.c` is automatically included. + * It's not useful to compile and link it as a separate object. + */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif +#else +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/*! XXH_NAMESPACE, aka Namespace Emulation : + * + * If you want to include _and expose_ xxHash functions from within your own library, + * but also want to avoid symbol collisions with other libraries which may also include xxHash, + * + * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library + * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * + * Note that no change is required within the calling program as long as it includes `xxhash.h` : + * regular symbol name will be automatically translated by this header. + */ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif -//**************************** -// Simple Hash Functions -//**************************** - -unsigned int XXH32 (const void* input, int len, unsigned int seed); -/* -XXH32() : - Calculate the 32-bits hash of sequence of length "len" stored at memory address "input". - The memory between input & input+len must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - This function successfully passes all SMHasher tests. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s - Note that "len" is type "int", which means it is limited to 2^31-1. - If your data is larger, use the advanced functions below. -*/ +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 7 +#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint32_t XXH32_hash_t; +#else +# include +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# else +# if ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform : need a 32-bit type" +# endif +# endif +#endif -//**************************** -// Advanced Hash Functions -//**************************** +/*! XXH32() : + Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); -void* XXH32_init (unsigned int seed); -XXH_errorcode XXH32_update (void* state, const void* input, int len); -unsigned int XXH32_digest (void* state); +/*====== Streaming ======*/ /* -These functions calculate the xxhash of an input provided in several small packets, -as opposed to an input provided as a single block. - -It must be started with : -void* XXH32_init() -The function returns a pointer which holds the state of calculation. - -This pointer must be provided as "void* state" parameter for XXH32_update(). -XXH32_update() can be called as many times as necessary. -The user must provide a valid (allocated) input. -The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. -Note that "len" is type "int", which means it is limited to 2^31-1. -If your data is larger, it is recommended to chunk your data into blocks -of size for example 2^30 (1GB) to avoid any "int" overflow issue. - -Finally, you can end the calculation anytime, by using XXH32_digest(). -This function returns the final 32-bits hash. -You must provide the same "void* state" parameter created by XXH32_init(). -Memory will be freed by XXH32_digest(). -*/ + * Streaming functions generate the xxHash value from an incrememtal input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * XXH state must first be allocated, using XXH*_createState() . + * + * Start a new hash by initializing state with a seed, using XXH*_reset(). + * + * Then, feed the hash state by calling XXH*_update() as many times as necessary. + * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using XXH*_digest(). + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a digest, + * and generate some new hash values later on, by invoking again XXH*_digest(). + * + * When done, release the state, using XXH*_freeState(). + */ + +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +/*====== Canonical representation ======*/ -int XXH32_sizeofState(); -XXH_errorcode XXH32_resetState(void* state, unsigned int seed); - -#define XXH32_SIZEOFSTATE 48 -typedef struct { long long ll[(XXH32_SIZEOFSTATE+(sizeof(long long)-1))/sizeof(long long)]; } XXH32_stateSpace_t; -/* -These functions allow user application to make its own allocation for state. - -XXH32_sizeofState() is used to know how much space must be allocated for the xxHash 32-bits state. -Note that the state must be aligned to access 'long long' fields. Memory must be allocated and referenced by a pointer. -This pointer must then be provided as 'state' into XXH32_resetState(), which initializes the state. +/* Default return values from XXH functions are basic unsigned 32 and 64 bits. + * This the simplest and fastest format for further post-processing. + * However, this leaves open the question of what is the order of bytes, + * since little and big endian conventions will write the same number differently. + * + * The canonical representation settles this issue, + * by mandating big-endian convention, + * aka, the same convention as human-readable numbers (large digits first). + * When writing hash values to storage, sending them over a network, or printing them, + * it's highly recommended to use the canonical representation, + * to ensure portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values into and from canonical format. + */ + +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t XXH64_hash_t; +#else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +#endif -For static allocation purposes (such as allocation on stack, or freestanding systems without malloc()), -use the structure XXH32_stateSpace_t, which will ensure that memory space is large enough and correctly aligned to access 'long long' fields. +/*! XXH64() : + Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). */ +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed); +/*====== Streaming ======*/ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); -unsigned int XXH32_intermediateDigest (void* state); -/* -This function does the same as XXH32_digest(), generating a 32-bit hash, -but preserve memory context. -This way, it becomes possible to generate intermediate hashes, and then continue feeding data with XXH32_update(). -To free memory context, use XXH32_digest(), or free(). -*/ - +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); +/*====== Canonical representation ======*/ +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); -//**************************** -// Deprecated function names -//**************************** -// The following translations are provided to ease code transition -// You are encouraged to no longer this function names -#define XXH32_feed XXH32_update -#define XXH32_result XXH32_digest -#define XXH32_getIntermediateResult XXH32_intermediateDigest -/*-********************************************************************** - * 64-bit hash - ************************************************************************/ -typedef unsigned long long XXH64_hash_t; +#endif /* XXH_NO_LONG_LONG */ -/*! XXH64() : - Calculate the 64-bit hash of sequence of length "len" stored at memory - address "input". "seed" can be used to alter the result predictably. This - function runs faster on 64-bit systems, but slower on 32-bit systems (see - benchmark). -*/ -XXH64_hash_t XXH64(const void* input, size_t length, unsigned long long seed); -/*====== Streaming ======*/ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH64_state_t* XXH64_createState(void); -XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); -XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed); -XXH_errorcode XXH64_update(XXH64_state_t* statePtr, const void* input, - size_t length); -XXH64_hash_t XXH64_digest(const XXH64_state_t* statePtr); +#ifdef XXH_STATIC_LINKING_ONLY -/*====== Canonical representation ======*/ -typedef struct { - unsigned char digest[8]; -} XXH64_canonical_t; -void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); -XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); +/* ================================================================================================ + This section contains declarations which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + These declarations should only be used with static linking. + Never use them in association with dynamic linking ! +=================================================================================================== */ /* These definitions are only present to allow * static allocation of XXH state, on stack or in a struct for example. * Never **ever** use members directly. */ -#if !defined(__VMS) && \ - (defined(__cplusplus) || \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) - +struct XXH32_state_s { + XXH32_hash_t total_len_32; + XXH32_hash_t large_len; + XXH32_hash_t v1; + XXH32_hash_t v2; + XXH32_hash_t v3; + XXH32_hash_t v4; + XXH32_hash_t mem32[4]; + XXH32_hash_t memsize; + XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +#ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ struct XXH64_state_s { - uint64_t total_len; - uint64_t v1; - uint64_t v2; - uint64_t v3; - uint64_t v4; - uint64_t mem64[4]; - uint32_t memsize; - uint32_t reserved[2]; /* never read nor write, might be removed in a future - version */ -}; /* typedef'd to XXH64_state_t */ + XXH64_hash_t total_len; + XXH64_hash_t v1; + XXH64_hash_t v2; + XXH64_hash_t v3; + XXH64_hash_t v4; + XXH64_hash_t mem64[4]; + XXH32_hash_t memsize; + XXH32_hash_t reserved32; /* required for padding anyway */ + XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ +#endif /* XXH_NO_LONG_LONG */ + + +/*-********************************************************************** +* XXH3 +* New experimental hash +************************************************************************/ +#ifndef XXH_NO_LONG_LONG + + +/* ============================================ + * XXH3 is a new hash algorithm, + * featuring improved speed performance for both small and large inputs. + * See full speed analysis at : http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * In general, expect XXH3 to run about ~2x faster on large inputs, + * and >3x faster on small ones, though exact differences depend on platform. + * + * The algorithm is portable, will generate the same hash on all platforms. + * It benefits greatly from vectorization units, but does not require it. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * When only 64 bits are needed, prefer calling the _64bits variant : + * it reduces the amount of mixing, resulting in faster speed on small inputs. + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The XXH3 algorithm is still considered experimental. + * Produced results can still change between versions. + * Results produced by v0.7.x are not comparable with results from v0.7.y . + * It's nonetheless possible to use XXH3 for ephemeral data (local sessions), + * but avoid storing values in long-term storage for later reads. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + * + * There are still a number of opened questions that community can influence during the experimental period. + * I'm trying to list a few of them below, though don't consider this list as complete. + * + * - 128-bits output type : currently defined as a structure of two 64-bits fields. + * That's because 128-bit values do not exist in C standard. + * Note that it means that, at byte level, result is not identical depending on endianess. + * However, at field level, they are identical on all platforms. + * The canonical representation solves the issue of identical byte-level representation across platforms, + * which is necessary for serialization. + * Q1 : Would there be a better representation for a 128-bit hash result ? + * Q2 : Are the names of the inner 64-bit fields important ? Should they be changed ? + * + * - Prototype XXH128() : XXH128() uses the same arguments as XXH64(), for consistency. + * It means it maps to XXH3p_128bits_withSeed(). + * This variant is slightly slower than XXH3p_128bits(), + * because the seed is now part of the algorithm, and can't be simplified. + * Is that a good idea ? + * + * - Seed type for XXH128() : currently, it's a single 64-bit value, like the 64-bit variant. + * It could be argued that it's more logical to offer a 128-bit seed input parameter for a 128-bit hash. + * But 128-bit seed is more difficult to use, since it requires to pass a structure instead of a scalar value. + * Such a variant could either replace current one, or become an additional one. + * Farmhash, for example, offers both variants (the 128-bits seed variant is called `doubleSeed`). + * Follow up question : if both 64-bit and 128-bit seeds are allowed, which variant should be called XXH128 ? + * + * - Result for len==0 : Currently, the result of hashing a zero-length input is always `0`. + * It seems okay as a return value when using "default" secret and seed. + * But is it still fine to return `0` when secret or seed are non-default ? + * Are there use cases which could depend on generating a different hash result for zero-length input when the secret is different ? + * + * - Consistency (1) : Streaming XXH128 uses an XXH3 state, which is the same state as XXH3p_64bits(). + * It means a 128bit streaming loop must invoke the following symbols : + * XXH3p_createState(), XXH3p_128bits_reset(), XXH3p_128bits_update() (loop), XXH3p_128bits_digest(), XXH3p_freeState(). + * Is that consistent enough ? + * + * - Consistency (2) : The canonical representation of `XXH3p_64bits` is provided by existing functions + * XXH64_canonicalFromHash(), and reverse operation XXH64_hashFromCanonical(). + * As a mirror, canonical functions for XXH128_hash_t results generated by `XXH3p_128bits` + * are XXH128_canonicalFromHash() and XXH128_hashFromCanonical(). + * Which means, `XXH3` doesn't appear in the names, because canonical functions operate on a type, + * independently of which algorithm was used to generate that type. + * Is that consistent enough ? + */ + +#ifdef XXH_NAMESPACE +# define XXH3p_64bits XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits) +# define XXH3p_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits_withSecret) +# define XXH3p_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits_withSeed) + +# define XXH3p_createState XXH_NAME2(XXH_NAMESPACE, XXH3p_createState) +# define XXH3p_freeState XXH_NAME2(XXH_NAMESPACE, XXH3p_freeState) +# define XXH3p_copyState XXH_NAME2(XXH_NAMESPACE, XXH3p_copyState) + +# define XXH3p_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits_reset) +# define XXH3p_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits_reset_withSeed) +# define XXH3p_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits_reset_withSecret) +# define XXH3p_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits_update) +# define XXH3p_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3p_64bits_digest) +#endif +/* XXH3p_64bits() : + * default 64-bit variant, using default secret and default seed of 0. + * It's the fastest variant. */ +XXH_PUBLIC_API XXH64_hash_t XXH3p_64bits(const void* data, size_t len); + +/* XXH3p_64bits_withSecret() : + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The secret *must* be large enough (>= XXH3p_SECRET_SIZE_MIN). + * It should consist of random bytes. + * Avoid repeating same character, or sequences of bytes, + * and especially avoid swathes of \0. + * Failure to respect these conditions will result in a poor quality hash. + */ +#define XXH3p_SECRET_SIZE_MIN 136 +XXH_PUBLIC_API XXH64_hash_t XXH3p_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); + +/* XXH3p_64bits_withSeed() : + * This variant generates on the fly a custom secret, + * based on the default secret, altered using the `seed` value. + * While this operation is decently fast, note that it's not completely free. + * note : seed==0 produces same results as XXH3p_64bits() */ +XXH_PUBLIC_API XXH64_hash_t XXH3p_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); + + +/* streaming 64-bit */ + +#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) #else +# define XXH_ALIGN(n) /* disabled */ +#endif -#ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ -struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; - unsigned memsize; - unsigned reserved[2]; /* never read nor write, might be removed in a future - version */ -}; /* typedef'd to XXH64_state_t */ +typedef struct XXH3p_state_s XXH3p_state_t; + +#define XXH3p_SECRET_DEFAULT_SIZE 192 /* minimum XXH3p_SECRET_SIZE_MIN */ +#define XXH3p_INTERNALBUFFER_SIZE 256 +struct XXH3p_state_s { + XXH_ALIGN(64) XXH64_hash_t acc[8]; + XXH_ALIGN(64) unsigned char customSecret[XXH3p_SECRET_DEFAULT_SIZE]; /* used to store a custom secret generated from the seed. Makes state larger. Design might change */ + XXH_ALIGN(64) unsigned char buffer[XXH3p_INTERNALBUFFER_SIZE]; + XXH32_hash_t bufferedSize; + XXH32_hash_t nbStripesPerBlock; + XXH32_hash_t nbStripesSoFar; + XXH32_hash_t secretLimit; + XXH32_hash_t reserved32; + XXH32_hash_t reserved32_2; + XXH64_hash_t totalLen; + XXH64_hash_t seed; + XXH64_hash_t reserved64; + const unsigned char* secret; /* note : there is some padding after, due to alignment on 64 bytes */ +}; /* typedef'd to XXH3p_state_t */ + +/* Streaming requires state maintenance. + * This operation costs memory and cpu. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer using one-shot functions whenever possible. */ + +XXH_PUBLIC_API XXH3p_state_t* XXH3p_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3p_freeState(XXH3p_state_t* statePtr); +XXH_PUBLIC_API void XXH3p_copyState(XXH3p_state_t* dst_state, const XXH3p_state_t* src_state); + + +/* XXH3p_64bits_reset() : + * initialize with default parameters. + * result will be equivalent to `XXH3p_64bits()`. */ +XXH_PUBLIC_API XXH_errorcode XXH3p_64bits_reset(XXH3p_state_t* statePtr); +/* XXH3p_64bits_reset_withSeed() : + * generate a custom secret from `seed`, and store it into state. + * digest will be equivalent to `XXH3p_64bits_withSeed()`. */ +XXH_PUBLIC_API XXH_errorcode XXH3p_64bits_reset_withSeed(XXH3p_state_t* statePtr, XXH64_hash_t seed); +/* XXH3p_64bits_reset_withSecret() : + * `secret` is referenced, and must outlive the hash streaming session. + * secretSize must be >= XXH3p_SECRET_SIZE_MIN. + */ +XXH_PUBLIC_API XXH_errorcode XXH3p_64bits_reset_withSecret(XXH3p_state_t* statePtr, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3p_64bits_update (XXH3p_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH3p_64bits_digest (const XXH3p_state_t* statePtr); + + +/* 128-bit */ + +#ifdef XXH_NAMESPACE +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3p_128bits XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits) +# define XXH3p_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits_withSeed) +# define XXH3p_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits_withSecret) + +# define XXH3p_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits_reset) +# define XXH3p_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits_reset_withSeed) +# define XXH3p_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits_reset_withSecret) +# define XXH3p_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits_update) +# define XXH3p_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3p_128bits_digest) + +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) #endif +typedef struct { + XXH64_hash_t low64; + XXH64_hash_t high64; +} XXH128_hash_t; + +XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); +XXH_PUBLIC_API XXH128_hash_t XXH3p_128bits(const void* data, size_t len); +XXH_PUBLIC_API XXH128_hash_t XXH3p_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); /* == XXH128() */ +XXH_PUBLIC_API XXH128_hash_t XXH3p_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3p_128bits_reset(XXH3p_state_t* statePtr); +XXH_PUBLIC_API XXH_errorcode XXH3p_128bits_reset_withSeed(XXH3p_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH3p_128bits_reset_withSecret(XXH3p_state_t* statePtr, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3p_128bits_update (XXH3p_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH128_hash_t XXH3p_128bits_digest (const XXH3p_state_t* statePtr); + + +/* Note : for better performance, following functions can be inlined, + * using XXH_INLINE_ALL */ + +/* return : 1 is equal, 0 if different */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/* This comparator is compatible with stdlib's qsort(). + * return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2); + + +/*====== Canonical representation ======*/ +typedef struct { unsigned char digest[16]; } XXH128_canonical_t; +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash); +XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src); + + +#endif /* XXH_NO_LONG_LONG */ + + +/*-********************************************************************** +* XXH_INLINE_ALL +************************************************************************/ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# include "xxhash.cc" /* include xxhash function bodies as `static`, for inlining */ #endif + + +#endif /* XXH_STATIC_LINKING_ONLY */ + + #if defined (__cplusplus) -} // namespace rocksdb +} #endif + +#endif /* XXHASH_H_5627135585666179 */ diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 084d817ea..45d04d188 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -179,8 +179,7 @@ TransactionLockMgr::~TransactionLockMgr() {} size_t LockMap::GetStripe(const std::string& key) const { assert(num_stripes_ > 0); - size_t stripe = static_cast(GetSliceNPHash64(key)) % num_stripes_; - return stripe; + return fastrange64(GetSliceNPHash64(key), num_stripes_); } void TransactionLockMgr::AddColumnFamily(uint32_t column_family_id) { diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index 7357907d0..9881b356c 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -3351,7 +3351,7 @@ TEST_P(WritePreparedTransactionTest, CommitOfDelayedPrepared) { snap.store(db->GetSnapshot()); ReadOptions roptions; roptions.snapshot = snap.load(); - auto s = db->Get(roptions, db->DefaultColumnFamily(), "key", &value); + auto s = db->Get(roptions, db->DefaultColumnFamily(), "key2", &value); ASSERT_OK(s); }; auto callback = [&](void* param) { @@ -3387,7 +3387,7 @@ TEST_P(WritePreparedTransactionTest, CommitOfDelayedPrepared) { ASSERT_OK(txn->SetName("xid")); std::string val_str = "value" + ToString(i); for (size_t b = 0; b < sub_batch_cnt; b++) { - ASSERT_OK(txn->Put(Slice("key"), val_str)); + ASSERT_OK(txn->Put(Slice("key2"), val_str)); } ASSERT_OK(txn->Prepare()); // Let an eviction to kick in @@ -3405,7 +3405,8 @@ TEST_P(WritePreparedTransactionTest, CommitOfDelayedPrepared) { roptions.snapshot = snap.load(); ASSERT_NE(nullptr, roptions.snapshot); PinnableSlice value2; - auto s = db->Get(roptions, db->DefaultColumnFamily(), "key", &value2); + auto s = + db->Get(roptions, db->DefaultColumnFamily(), "key2", &value2); ASSERT_OK(s); // It should see its own write ASSERT_TRUE(val_str == value2);