Commit | Line | Data |
---|---|---|
9f0e365d JA |
1 | #include "murmur3.h" |
2 | ||
3 | static inline uint32_t rotl32(uint32_t x, int8_t r) | |
4 | { | |
5 | return (x << r) | (x >> (32 - r)); | |
6 | } | |
7 | ||
8 | //----------------------------------------------------------------------------- | |
9 | // Finalization mix - force all bits of a hash block to avalanche | |
10 | ||
11 | static inline uint32_t fmix32(uint32_t h) | |
12 | { | |
13 | h ^= h >> 16; | |
14 | h *= 0x85ebca6b; | |
15 | h ^= h >> 13; | |
16 | h *= 0xc2b2ae35; | |
17 | h ^= h >> 16; | |
18 | ||
19 | return h; | |
20 | } | |
21 | ||
22 | static uint32_t murmur3_tail(const uint8_t *data, const int nblocks, | |
23 | uint32_t len, const uint32_t c1, | |
24 | const uint32_t c2, uint32_t h1) | |
25 | { | |
26 | const uint8_t *tail = (const uint8_t *)(data + nblocks * 4); | |
27 | ||
28 | uint32_t k1 = 0; | |
29 | switch (len & 3) { | |
30 | case 3: | |
31 | k1 ^= tail[2] << 16; | |
24010223 | 32 | /* fall through */ |
9f0e365d JA |
33 | case 2: |
34 | k1 ^= tail[1] << 8; | |
24010223 | 35 | /* fall through */ |
9f0e365d JA |
36 | case 1: |
37 | k1 ^= tail[0]; | |
38 | k1 *= c1; | |
39 | k1 = rotl32(k1, 15); | |
40 | k1 *= c2; | |
41 | h1 ^= k1; | |
42 | }; | |
43 | ||
44 | return fmix32(h1 ^ len); | |
45 | } | |
46 | ||
47 | uint32_t murmurhash3(const void *key, uint32_t len, uint32_t seed) | |
48 | { | |
49 | const uint8_t *data = (const uint8_t *)key; | |
50 | const int nblocks = len / 4; | |
51 | uint32_t h1 = seed; | |
52 | const uint32_t c1 = 0xcc9e2d51; | |
53 | const uint32_t c2 = 0x1b873593; | |
54 | const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4); | |
55 | int i; | |
56 | ||
57 | for (i = -nblocks; i; i++) { | |
58 | uint32_t k1 = blocks[i]; | |
59 | ||
60 | k1 *= c1; | |
61 | k1 = rotl32(k1, 15); | |
62 | k1 *= c2; | |
63 | ||
64 | h1 ^= k1; | |
65 | h1 = rotl32(h1, 13); | |
66 | h1 = h1 * 5 + 0xe6546b64; | |
67 | } | |
68 | ||
69 | return murmur3_tail(data, nblocks, len, c1, c2, h1); | |
70 | } |