| 1 | #include "murmur3.h" |
| 2 | |
| 3 | static inline uint32_t rotl32(uint32_t x, int8_t r) |
| 4 | { |
| 5 | return (x << r) | (x >> (32 - r)); |
| 6 | } |
| 7 | |
| 8 | //----------------------------------------------------------------------------- |
| 9 | // Finalization mix - force all bits of a hash block to avalanche |
| 10 | |
| 11 | static inline uint32_t fmix32(uint32_t h) |
| 12 | { |
| 13 | h ^= h >> 16; |
| 14 | h *= 0x85ebca6b; |
| 15 | h ^= h >> 13; |
| 16 | h *= 0xc2b2ae35; |
| 17 | h ^= h >> 16; |
| 18 | |
| 19 | return h; |
| 20 | } |
| 21 | |
| 22 | static uint32_t murmur3_tail(const uint8_t *data, const int nblocks, |
| 23 | uint32_t len, const uint32_t c1, |
| 24 | const uint32_t c2, uint32_t h1) |
| 25 | { |
| 26 | const uint8_t *tail = (const uint8_t *)(data + nblocks * 4); |
| 27 | |
| 28 | uint32_t k1 = 0; |
| 29 | switch (len & 3) { |
| 30 | case 3: |
| 31 | k1 ^= tail[2] << 16; |
| 32 | case 2: |
| 33 | k1 ^= tail[1] << 8; |
| 34 | case 1: |
| 35 | k1 ^= tail[0]; |
| 36 | k1 *= c1; |
| 37 | k1 = rotl32(k1, 15); |
| 38 | k1 *= c2; |
| 39 | h1 ^= k1; |
| 40 | }; |
| 41 | |
| 42 | return fmix32(h1 ^ len); |
| 43 | } |
| 44 | |
| 45 | uint32_t murmurhash3(const void *key, uint32_t len, uint32_t seed) |
| 46 | { |
| 47 | const uint8_t *data = (const uint8_t *)key; |
| 48 | const int nblocks = len / 4; |
| 49 | uint32_t h1 = seed; |
| 50 | const uint32_t c1 = 0xcc9e2d51; |
| 51 | const uint32_t c2 = 0x1b873593; |
| 52 | const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4); |
| 53 | int i; |
| 54 | |
| 55 | for (i = -nblocks; i; i++) { |
| 56 | uint32_t k1 = blocks[i]; |
| 57 | |
| 58 | k1 *= c1; |
| 59 | k1 = rotl32(k1, 15); |
| 60 | k1 *= c2; |
| 61 | |
| 62 | h1 ^= k1; |
| 63 | h1 = rotl32(h1, 13); |
| 64 | h1 = h1 * 5 + 0xe6546b64; |
| 65 | } |
| 66 | |
| 67 | return murmur3_tail(data, nblocks, len, c1, c2, h1); |
| 68 | } |