Rizin
unix-like reverse engineering framework and cli tools
hash.h
Go to the documentation of this file.
1 /*
2  * The following hash function is based on MurmurHash3, placed into the public
3  * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
4  * details.
5  */
6 /******************************************************************************/
7 #ifdef JEMALLOC_H_TYPES
8 
9 #endif /* JEMALLOC_H_TYPES */
10 /******************************************************************************/
11 #ifdef JEMALLOC_H_STRUCTS
12 
13 #endif /* JEMALLOC_H_STRUCTS */
14 /******************************************************************************/
15 #ifdef JEMALLOC_H_EXTERNS
16 
17 #endif /* JEMALLOC_H_EXTERNS */
18 /******************************************************************************/
19 #ifdef JEMALLOC_H_INLINES
20 
21 #ifndef JEMALLOC_ENABLE_INLINE
22 uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
23 void hash_x86_128(const void *key, const int len, uint32_t seed,
24  uint64_t rz_out[2]);
25 void hash_x64_128(const void *key, const int len, const uint32_t seed,
26  uint64_t rz_out[2]);
27 void hash(const void *key, size_t len, const uint32_t seed,
28  size_t rz_hash[2]);
29 #endif
30 
31 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
32 /******************************************************************************/
33 /* Internal implementation. */
36 {
37 
38  return ((x << r) | (x >> (32 - r)));
39 }
40 
43 {
44 
45  return ((x << r) | (x >> (64 - r)));
46 }
47 
49 hash_get_block_32(const uint32_t *p, int i)
50 {
51 
52  /* Handle unaligned read. */
53  if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
54  uint32_t ret;
55 
56  memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
57  return (ret);
58  }
59 
60  return (p[i]);
61 }
62 
64 hash_get_block_64(const uint64_t *p, int i)
65 {
66 
67  /* Handle unaligned read. */
68  if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
69  uint64_t ret;
70 
71  memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
72  return (ret);
73  }
74 
75  return (p[i]);
76 }
77 
80 {
81 
82  h ^= h >> 16;
83  h *= 0x85ebca6b;
84  h ^= h >> 13;
85  h *= 0xc2b2ae35;
86  h ^= h >> 16;
87 
88  return (h);
89 }
90 
93 {
94 
95  k ^= k >> 33;
96  k *= KQU(0xff51afd7ed558ccd);
97  k ^= k >> 33;
98  k *= KQU(0xc4ceb9fe1a85ec53);
99  k ^= k >> 33;
100 
101  return (k);
102 }
103 
105 hash_x86_32(const void *key, int len, uint32_t seed)
106 {
107  const uint8_t *data = (const uint8_t *) key;
108  const int nblocks = len / 4;
109 
110  uint32_t h1 = seed;
111 
112  const uint32_t c1 = 0xcc9e2d51;
113  const uint32_t c2 = 0x1b873593;
114 
115  /* body */
116  {
117  const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
118  int i;
119 
120  for (i = -nblocks; i; i++) {
122 
123  k1 *= c1;
124  k1 = hash_rotl_32(k1, 15);
125  k1 *= c2;
126 
127  h1 ^= k1;
128  h1 = hash_rotl_32(h1, 13);
129  h1 = h1*5 + 0xe6546b64;
130  }
131  }
132 
133  /* tail */
134  {
135  const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
136 
137  uint32_t k1 = 0;
138 
139  switch (len & 3) {
140  case 3: k1 ^= tail[2] << 16;
141  case 2: k1 ^= tail[1] << 8;
142  case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
143  k1 *= c2; h1 ^= k1;
144  }
145  }
146 
147  /* finalization */
148  h1 ^= len;
149 
150  h1 = hash_fmix_32(h1);
151 
152  return (h1);
153 }
154 
156 hash_x86_128(const void *key, const int len, uint32_t seed,
157  uint64_t rz_out[2])
158 {
159  const uint8_t * data = (const uint8_t *) key;
160  const int nblocks = len / 16;
161 
162  uint32_t h1 = seed;
163  uint32_t h2 = seed;
164  uint32_t h3 = seed;
165  uint32_t h4 = seed;
166 
167  const uint32_t c1 = 0x239b961b;
168  const uint32_t c2 = 0xab0e9789;
169  const uint32_t c3 = 0x38b34ae5;
170  const uint32_t c4 = 0xa1e38b93;
171 
172  /* body */
173  {
174  const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
175  int i;
176 
177  for (i = -nblocks; i; i++) {
178  uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
179  uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
180  uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
181  uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
182 
183  k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
184 
185  h1 = hash_rotl_32(h1, 19); h1 += h2;
186  h1 = h1*5 + 0x561ccd1b;
187 
188  k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
189 
190  h2 = hash_rotl_32(h2, 17); h2 += h3;
191  h2 = h2*5 + 0x0bcaa747;
192 
193  k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
194 
195  h3 = hash_rotl_32(h3, 15); h3 += h4;
196  h3 = h3*5 + 0x96cd1c35;
197 
198  k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
199 
200  h4 = hash_rotl_32(h4, 13); h4 += h1;
201  h4 = h4*5 + 0x32ac3b17;
202  }
203  }
204 
205  /* tail */
206  {
207  const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
208  uint32_t k1 = 0;
209  uint32_t k2 = 0;
210  uint32_t k3 = 0;
211  uint32_t k4 = 0;
212 
213  switch (len & 15) {
214  case 15: k4 ^= tail[14] << 16;
215  case 14: k4 ^= tail[13] << 8;
216  case 13: k4 ^= tail[12] << 0;
217  k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
218 
219  case 12: k3 ^= tail[11] << 24;
220  case 11: k3 ^= tail[10] << 16;
221  case 10: k3 ^= tail[ 9] << 8;
222  case 9: k3 ^= tail[ 8] << 0;
223  k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
224 
225  case 8: k2 ^= tail[ 7] << 24;
226  case 7: k2 ^= tail[ 6] << 16;
227  case 6: k2 ^= tail[ 5] << 8;
228  case 5: k2 ^= tail[ 4] << 0;
229  k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
230 
231  case 4: k1 ^= tail[ 3] << 24;
232  case 3: k1 ^= tail[ 2] << 16;
233  case 2: k1 ^= tail[ 1] << 8;
234  case 1: k1 ^= tail[ 0] << 0;
235  k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
236  }
237  }
238 
239  /* finalization */
240  h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
241 
242  h1 += h2; h1 += h3; h1 += h4;
243  h2 += h1; h3 += h1; h4 += h1;
244 
245  h1 = hash_fmix_32(h1);
246  h2 = hash_fmix_32(h2);
247  h3 = hash_fmix_32(h3);
248  h4 = hash_fmix_32(h4);
249 
250  h1 += h2; h1 += h3; h1 += h4;
251  h2 += h1; h3 += h1; h4 += h1;
252 
253  rz_out[0] = (((uint64_t) h2) << 32) | h1;
254  rz_out[1] = (((uint64_t) h4) << 32) | h3;
255 }
256 
258 hash_x64_128(const void *key, const int len, const uint32_t seed,
259  uint64_t rz_out[2])
260 {
261  const uint8_t *data = (const uint8_t *) key;
262  const int nblocks = len / 16;
263 
264  uint64_t h1 = seed;
265  uint64_t h2 = seed;
266 
267  const uint64_t c1 = KQU(0x87c37b91114253d5);
268  const uint64_t c2 = KQU(0x4cf5ad432745937f);
269 
270  /* body */
271  {
272  const uint64_t *blocks = (const uint64_t *) (data);
273  int i;
274 
275  for (i = 0; i < nblocks; i++) {
276  uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
277  uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
278 
279  k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
280 
281  h1 = hash_rotl_64(h1, 27); h1 += h2;
282  h1 = h1*5 + 0x52dce729;
283 
284  k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
285 
286  h2 = hash_rotl_64(h2, 31); h2 += h1;
287  h2 = h2*5 + 0x38495ab5;
288  }
289  }
290 
291  /* tail */
292  {
293  const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
294  uint64_t k1 = 0;
295  uint64_t k2 = 0;
296 
297  switch (len & 15) {
298  case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
299  case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
300  case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
301  case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
302  case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
303  case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
304  case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
305  k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
306 
307  case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
308  case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
309  case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
310  case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
311  case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
312  case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
313  case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
314  case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
315  k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
316  }
317  }
318 
319  /* finalization */
320  h1 ^= len; h2 ^= len;
321 
322  h1 += h2;
323  h2 += h1;
324 
325  h1 = hash_fmix_64(h1);
326  h2 = hash_fmix_64(h2);
327 
328  h1 += h2;
329  h2 += h1;
330 
331  rz_out[0] = h1;
332  rz_out[1] = h2;
333 }
334 
335 /******************************************************************************/
336 /* API. */
337 JEMALLOC_INLINE void
338 hash(const void *key, size_t len, const uint32_t seed, size_t rz_hash[2])
339 {
340 
341  assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
342 
343 #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
344  hash_x64_128(key, (int)len, seed, (uint64_t *)rz_hash);
345 #else
346  {
347  uint64_t hashes[2];
348  hash_x86_128(key, (int)len, seed, hashes);
349  rz_hash[0] = (size_t)hashes[0];
350  rz_hash[1] = (size_t)hashes[1];
351  }
352 #endif
353 }
354 #endif
355 
356 #endif /* JEMALLOC_H_INLINES */
357 /******************************************************************************/
size_t len
Definition: 6502dis.c:15
lzma_index ** i
Definition: index.h:629
lsl lsr asr ror lsl lsr asr ror lsl lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror c1
lsl lsr asr ror lsl lsr asr ror lsl lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror c3
lsl lsr asr ror lsl lsr asr ror lsl lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror c2
lsl lsr asr ror lsl lsr asr ror lsl lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror lsl lsr asr ror c4
#define INT_MAX
Definition: cp-demangle.c:131
#define r
Definition: crypto_rc6.c:12
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void static offset struct stat static buf void long static basep static whence static length const void static len key
Definition: sflib.h:118
const char * k
Definition: dsignal.c:11
#define KQU(q)
#define JEMALLOC_INLINE
void * p
Definition: libc.cpp:67
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
#define unlikely(expr)
Definition: lz4.c:177
assert(limit<=UINT32_MAX/2)
int x
Definition: mipsasm.c:20
v0.v4.v8.v15.v3.v30.v14.v1. h2
#define hash_get_block_64
#define hash_x86_128
#define hash_x86_32
#define hash_rotl_64
#define hash_fmix_64
#define hash_fmix_32
#define hash_rotl_32
#define hash_get_block_32
#define hash_x64_128
#define RZ_UNUSED
Definition: rz_types.h:73
int size_t
Definition: sftypes.h:40
unsigned int uint32_t
Definition: sftypes.h:29
unsigned long uint64_t
Definition: sftypes.h:28
unsigned char uint8_t
Definition: sftypes.h:31
char int8_t
Definition: sftypes.h:35
#define h(i)
Definition: sha256.c:48
_W64 unsigned int uintptr_t
uint64_t blocks
Definition: list.c:104