StarPU Internal Handbook
uthash.h
1 /*
2 Copyright (c) 2003-2010, Troy D. Hanson http://uthash.sourceforge.net
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
27 #include <string.h> /* memcmp,strlen */
28 #include <stddef.h> /* ptrdiff_t */
29 
30 /* These macros use decltype or the earlier __typeof GNU extension.
31  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
32  when compiling c++ source) this code uses whatever method is needed
33  or, for VS2008 where neither is available, uses casting workarounds. */
34 #ifdef _MSC_VER /* MS compiler */
35 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
36 #define DECLTYPE(x) (decltype(x))
37 #else /* VS2008 or older (or VS2010 in C mode) */
38 #define NO_DECLTYPE
39 #define DECLTYPE(x)
40 #endif
41 #else /* GNU, Sun and other compilers */
42 #define DECLTYPE(x) (__typeof(x))
43 #endif
44 
45 #ifdef NO_DECLTYPE
46 #define DECLTYPE_ASSIGN(dst,src) \
47 do { \
48  char **_da_dst = (char**)(&(dst)); \
49  *_da_dst = (char*)(src); \
50 } while(0)
51 #else
52 #define DECLTYPE_ASSIGN(dst,src) \
53 do { \
54  (dst) = DECLTYPE(dst)(src); \
55 } while(0)
56 #endif
57 
58 /* a number of the hash function use uint32_t which isn't defined on win32 */
59 #ifdef _MSC_VER
60 typedef unsigned int uint32_t;
61 #else
62 #include <inttypes.h> /* uint32_t */
63 #endif
64 
65 #define UTHASH_VERSION 1.9.3
66 
67 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
68 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
69 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
70 
71 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
72 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
73 
74 /* initial number of buckets */
75 #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
76 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
77 #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
78 
79 /* calculate the element whose hash handle address is hhe */
80 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
81 
82 #define HASH_FIND(hh,head,keyptr,keylen,out) \
83 do { \
84  unsigned _hf_bkt=0,_hf_hashv=0; \
85  out=NULL; \
86  if (head) { \
87  HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
88  if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \
89  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
90  keyptr,keylen,out); \
91  } \
92  } \
93 } while (0)
94 
95 #ifdef HASH_BLOOM
96 #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM)
97 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0)
98 #define HASH_BLOOM_MAKE(tbl) \
99 do { \
100  (tbl)->bloom_nbits = HASH_BLOOM; \
101  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
102  if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
103  memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
104  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
105 } while (0);
106 
107 #define HASH_BLOOM_FREE(tbl) \
108 do { \
109  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
110 } while (0);
111 
112 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8)))
113 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8)))
114 
115 #define HASH_BLOOM_ADD(tbl,hashv) \
116  HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
117 
118 #define HASH_BLOOM_TEST(tbl,hashv) \
119  HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
120 
121 #else
122 #define HASH_BLOOM_MAKE(tbl)
123 #define HASH_BLOOM_FREE(tbl)
124 #define HASH_BLOOM_ADD(tbl,hashv)
125 #define HASH_BLOOM_TEST(tbl,hashv) (1)
126 #endif
127 
128 #define HASH_MAKE_TABLE(hh,head) \
129 do { \
130  (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
131  sizeof(UT_hash_table)); \
132  if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
133  memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
134  (head)->hh.tbl->tail = &((head)->hh); \
135  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
136  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
137  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
138  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
139  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
140  if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
141  memset((head)->hh.tbl->buckets, 0, \
142  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
143  HASH_BLOOM_MAKE((head)->hh.tbl); \
144  (head)->hh.tbl->signature = HASH_SIGNATURE; \
145 } while(0)
146 
147 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
148  HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add)
149 
150 #ifdef STARPU_DEBUG
151 /* Check that we don't insert the same key several times */
152 #define HASH_CHECK_KEY(hh,head,keyptr,keylen,out) \
153 do { \
154  __typeof__(out) _out; \
155  HASH_FIND(hh,head,keyptr,keylen,_out); \
156  STARPU_ASSERT_MSG(!_out,"Cannot insert the same key twice"); \
157 } while(0)
158 #else
159 #define HASH_CHECK_KEY(hh,head,keyptr,keylen,out)
160 #endif
161 
162 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
163 do { \
164  unsigned _ha_bkt=0; \
165  HASH_CHECK_KEY(hh,head,keyptr,keylen_in,add); \
166  (add)->hh.next = NULL; \
167  (add)->hh.key = (char*)keyptr; \
168  (add)->hh.keylen = keylen_in; \
169  if (!(head)) { \
170  head = (add); \
171  (head)->hh.prev = NULL; \
172  HASH_MAKE_TABLE(hh,head); \
173  } else { \
174  (head)->hh.tbl->tail->next = (add); \
175  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
176  (head)->hh.tbl->tail = &((add)->hh); \
177  } \
178  (head)->hh.tbl->num_items++; \
179  (add)->hh.tbl = (head)->hh.tbl; \
180  HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
181  (add)->hh.hashv, _ha_bkt); \
182  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
183  HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \
184  HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
185  HASH_FSCK(hh,head); \
186 } while(0)
187 
188 #define HASH_TO_BKT( hashv, num_bkts, bkt ) \
189 do { \
190  bkt = ((hashv) & ((num_bkts) - 1)); \
191 } while(0)
192 
193 /* delete "delptr" from the hash table.
194  * "the usual" patch-up process for the app-order doubly-linked-list.
195  * The use of _hd_hh_del below deserves special explanation.
196  * These used to be expressed using (delptr) but that led to a bug
197  * if someone used the same symbol for the head and deletee, like
198  * HASH_DELETE(hh,users,users);
199  * We want that to work, but by changing the head (users) below
200  * we were forfeiting our ability to further refer to the deletee (users)
201  * in the patch-up process. Solution: use scratch space to
202  * copy the deletee pointer, then the latter references are via that
203  * scratch pointer rather than through the repointed (users) symbol.
204  */
205 #define HASH_DELETE(hh,head,delptr) \
206 do { \
207  unsigned _hd_bkt; \
208  struct UT_hash_handle *_hd_hh_del; \
209  if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
210  uthash_free((head)->hh.tbl->buckets, \
211  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
212  HASH_BLOOM_FREE((head)->hh.tbl); \
213  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
214  head = NULL; \
215  } else { \
216  _hd_hh_del = &((delptr)->hh); \
217  if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
218  (head)->hh.tbl->tail = \
219  (UT_hash_handle*)((char*)((delptr)->hh.prev) + \
220  (head)->hh.tbl->hho); \
221  } \
222  if ((delptr)->hh.prev) { \
223  ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \
224  (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
225  } else { \
226  DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
227  } \
228  if (_hd_hh_del->next) { \
229  ((UT_hash_handle*)((char*)_hd_hh_del->next + \
230  (head)->hh.tbl->hho))->prev = \
231  _hd_hh_del->prev; \
232  } \
233  HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
234  HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
235  (head)->hh.tbl->num_items--; \
236  } \
237  HASH_FSCK(hh,head); \
238 } while (0)
239 
240 
241 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
242 #define HASH_FIND_STR(head,findstr,out) \
243  HASH_FIND(hh,head,findstr,strlen(findstr),out)
244 #define HASH_ADD_STR(head,strfield,add) \
245  HASH_ADD(hh,head,strfield[0],strlen(add->strfield),add)
246 #define HASH_FIND_INT(head,findint,out) \
247  HASH_FIND(hh,head,findint,sizeof(int),out)
248 #define HASH_ADD_INT(head,intfield,add) \
249  HASH_ADD(hh,head,intfield,sizeof(int),add)
250 #define HASH_FIND_PTR(head,findptr,out) \
251  HASH_FIND(hh,head,findptr,sizeof(void *),out)
252 #define HASH_ADD_PTR(head,ptrfield,add) \
253  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
254 #define HASH_DEL(head,delptr) \
255  HASH_DELETE(hh,head,delptr)
256 
257 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
258  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
259  */
260 #ifdef HASH_DEBUG
261 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
262 #define HASH_FSCK(hh,head) \
263 do { \
264  unsigned _bkt_i; \
265  unsigned _count, _bkt_count; \
266  char *_prev; \
267  struct UT_hash_handle *_thh; \
268  if (head) { \
269  _count = 0; \
270  for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
271  _bkt_count = 0; \
272  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
273  _prev = NULL; \
274  while (_thh) { \
275  if (_prev != (char*)(_thh->hh_prev)) { \
276  HASH_OOPS("invalid hh_prev %p, actual %p\n", \
277  _thh->hh_prev, _prev ); \
278  } \
279  _bkt_count++; \
280  _prev = (char*)(_thh); \
281  _thh = _thh->hh_next; \
282  } \
283  _count += _bkt_count; \
284  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
285  HASH_OOPS("invalid bucket count %u, actual %u\n", \
286  (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
287  } \
288  } \
289  if (_count != (head)->hh.tbl->num_items) { \
290  HASH_OOPS("invalid hh item count %u, actual %u\n", \
291  (head)->hh.tbl->num_items, _count ); \
292  } \
293  /* traverse hh in app order; check next/prev integrity, count */ \
294  _count = 0; \
295  _prev = NULL; \
296  _thh = &(head)->hh; \
297  while (_thh) { \
298  _count++; \
299  if (_prev !=(char*)(_thh->prev)) { \
300  HASH_OOPS("invalid prev %p, actual %p\n", \
301  _thh->prev, _prev ); \
302  } \
303  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
304  _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
305  (head)->hh.tbl->hho) : NULL ); \
306  } \
307  if (_count != (head)->hh.tbl->num_items) { \
308  HASH_OOPS("invalid app item count %u, actual %u\n", \
309  (head)->hh.tbl->num_items, _count ); \
310  } \
311  } \
312 } while (0)
313 #else
314 #define HASH_FSCK(hh,head)
315 #endif
316 
317 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
318  * the descriptor to which this macro is defined for tuning the hash function.
319  * The app can #include <unistd.h> to get the prototype for write(2). */
320 #ifdef HASH_EMIT_KEYS
321 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
322 do { \
323  unsigned _klen = fieldlen; \
324  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
325  write(HASH_EMIT_KEYS, keyptr, fieldlen); \
326 } while (0)
327 #else
328 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
329 #endif
330 
331 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
332 #ifdef HASH_FUNCTION
333 #define HASH_FCN HASH_FUNCTION
334 #else
335 #define HASH_FCN HASH_JEN
336 #endif
337 
338 /* The Bernstein hash function, used in Perl prior to v5.6 */
339 #define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
340 do { \
341  unsigned _hb_keylen=keylen; \
342  char *_hb_key=(char*)(key); \
343  (hashv) = 0; \
344  while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
345  bkt = (hashv) & (num_bkts-1); \
346 } while (0)
347 
348 
349 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
350  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
351 #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
352 do { \
353  unsigned _sx_i; \
354  char *_hs_key=(char*)(key); \
355  hashv = 0; \
356  for(_sx_i=0; _sx_i < keylen; _sx_i++) \
357  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
358  bkt = hashv & (num_bkts-1); \
359 } while (0)
360 
361 #define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
362 do { \
363  unsigned _fn_i; \
364  char *_hf_key=(char*)(key); \
365  hashv = 2166136261UL; \
366  for(_fn_i=0; _fn_i < keylen; _fn_i++) \
367  hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \
368  bkt = hashv & (num_bkts-1); \
369 } while(0);
370 
371 #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
372 do { \
373  unsigned _ho_i; \
374  char *_ho_key=(char*)(key); \
375  hashv = 0; \
376  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
377  hashv += _ho_key[_ho_i]; \
378  hashv += (hashv << 10); \
379  hashv ^= (hashv >> 6); \
380  } \
381  hashv += (hashv << 3); \
382  hashv ^= (hashv >> 11); \
383  hashv += (hashv << 15); \
384  bkt = hashv & (num_bkts-1); \
385 } while(0)
386 
387 #define HASH_JEN_MIX(a,b,c) \
388 do { \
389  a -= b; a -= c; a ^= ( c >> 13 ); \
390  b -= c; b -= a; b ^= ( a << 8 ); \
391  c -= a; c -= b; c ^= ( b >> 13 ); \
392  a -= b; a -= c; a ^= ( c >> 12 ); \
393  b -= c; b -= a; b ^= ( a << 16 ); \
394  c -= a; c -= b; c ^= ( b >> 5 ); \
395  a -= b; a -= c; a ^= ( c >> 3 ); \
396  b -= c; b -= a; b ^= ( a << 10 ); \
397  c -= a; c -= b; c ^= ( b >> 15 ); \
398 } while (0)
399 
400 #define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
401 do { \
402  unsigned _hj_i,_hj_j,_hj_k; \
403  char *_hj_key=(char*)(key); \
404  hashv = 0xfeedbeef; \
405  _hj_i = _hj_j = 0x9e3779b9; \
406  _hj_k = keylen; \
407  while (_hj_k >= 12) { \
408  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
409  + ( (unsigned)_hj_key[2] << 16 ) \
410  + ( (unsigned)_hj_key[3] << 24 ) ); \
411  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
412  + ( (unsigned)_hj_key[6] << 16 ) \
413  + ( (unsigned)_hj_key[7] << 24 ) ); \
414  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
415  + ( (unsigned)_hj_key[10] << 16 ) \
416  + ( (unsigned)_hj_key[11] << 24 ) ); \
417  \
418  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
419  \
420  _hj_key += 12; \
421  _hj_k -= 12; \
422  } \
423  hashv += keylen; \
424  switch ( _hj_k ) { \
425  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
426  /* FALLTHRU */ \
427  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
428  /* FALLTHRU */ \
429  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
430  /* FALLTHRU */ \
431  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
432  /* FALLTHRU */ \
433  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
434  /* FALLTHRU */ \
435  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
436  /* FALLTHRU */ \
437  case 5: _hj_j += _hj_key[4]; \
438  /* FALLTHRU */ \
439  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
440  /* FALLTHRU */ \
441  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
442  /* FALLTHRU */ \
443  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
444  /* FALLTHRU */ \
445  case 1: _hj_i += _hj_key[0]; \
446  /* FALLTHRU */ \
447  default: break; \
448  } \
449  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
450  bkt = hashv & (num_bkts-1); \
451 } while(0)
452 
453 /* The Paul Hsieh hash function */
454 #undef get16bits
455 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
456  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
457 #define get16bits(d) (*((const uint16_t *) (d)))
458 #endif
459 
460 #if !defined (get16bits)
461 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
462  +(uint32_t)(((const uint8_t *)(d))[0]) )
463 #endif
464 #define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
465 do { \
466  char *_sfh_key=(char*)(key); \
467  uint32_t _sfh_tmp, _sfh_len = keylen; \
468  \
469  int _sfh_rem = _sfh_len & 3; \
470  _sfh_len >>= 2; \
471  hashv = 0xcafebabe; \
472  \
473  /* Main loop */ \
474  for (;_sfh_len > 0; _sfh_len--) { \
475  hashv += get16bits (_sfh_key); \
476  _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \
477  hashv = (hashv << 16) ^ _sfh_tmp; \
478  _sfh_key += 2*sizeof (uint16_t); \
479  hashv += hashv >> 11; \
480  } \
481  \
482  /* Handle end cases */ \
483  switch (_sfh_rem) { \
484  case 3: hashv += get16bits (_sfh_key); \
485  hashv ^= hashv << 16; \
486  hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \
487  hashv += hashv >> 11; \
488  break; \
489  case 2: hashv += get16bits (_sfh_key); \
490  hashv ^= hashv << 11; \
491  hashv += hashv >> 17; \
492  break; \
493  case 1: hashv += *_sfh_key; \
494  hashv ^= hashv << 10; \
495  hashv += hashv >> 1; \
496  break; \
497  default: break; \
498  } \
499  \
500  /* Force "avalanching" of final 127 bits */ \
501  hashv ^= hashv << 3; \
502  hashv += hashv >> 5; \
503  hashv ^= hashv << 4; \
504  hashv += hashv >> 17; \
505  hashv ^= hashv << 25; \
506  hashv += hashv >> 6; \
507  bkt = hashv & (num_bkts-1); \
508 } while(0);
509 
510 #ifdef HASH_USING_NO_STRICT_ALIASING
511 /* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads.
512  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
513  * So MurmurHash comes in two versions, the faster unaligned one and the slower
514  * aligned one. We only use the faster one on CPU's where we know it's safe.
515  *
516  * Note the preprocessor built-in defines can be emitted using:
517  *
518  * gcc -m64 -dM -E - < /dev/null (on gcc)
519  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
520  */
521 #if (defined(__i386__) || defined(__x86_64__))
522 #define HASH_MUR HASH_MUR_UNALIGNED
523 #else
524 #define HASH_MUR HASH_MUR_ALIGNED
525 #endif
526 
527 /* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */
528 #define HASH_MUR_UNALIGNED(key,keylen,num_bkts,hashv,bkt) \
529 do { \
530  const unsigned int _mur_m = 0x5bd1e995; \
531  const int _mur_r = 24; \
532  hashv = 0xcafebabe ^ keylen; \
533  char *_mur_key = (char *)(key); \
534  uint32_t _mur_tmp, _mur_len = keylen; \
535  \
536  for (;_mur_len >= 4; _mur_len-=4) { \
537  _mur_tmp = *(uint32_t *)_mur_key; \
538  _mur_tmp *= _mur_m; \
539  _mur_tmp ^= _mur_tmp >> _mur_r; \
540  _mur_tmp *= _mur_m; \
541  hashv *= _mur_m; \
542  hashv ^= _mur_tmp; \
543  _mur_key += 4; \
544  } \
545  \
546  switch(_mur_len) \
547  { \
548  case 3: hashv ^= _mur_key[2] << 16; \
549  /* FALLTHRU */ \
550  case 2: hashv ^= _mur_key[1] << 8; \
551  /* FALLTHRU */ \
552  case 1: hashv ^= _mur_key[0]; \
553  hashv *= _mur_m; \
554  /* FALLTHRU */ \
555  default: break; \
556  }; \
557  \
558  hashv ^= hashv >> 13; \
559  hashv *= _mur_m; \
560  hashv ^= hashv >> 15; \
561  \
562  bkt = hashv & (num_bkts-1); \
563 } while(0)
564 
565 /* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */
566 #define HASH_MUR_ALIGNED(key,keylen,num_bkts,hashv,bkt) \
567 do { \
568  const unsigned int _mur_m = 0x5bd1e995; \
569  const int _mur_r = 24; \
570  hashv = 0xcafebabe ^ (keylen); \
571  char *_mur_key = (char *)(key); \
572  uint32_t _mur_len = keylen; \
573  int _mur_align = (int)_mur_key & 3; \
574  \
575  if (_mur_align && (_mur_len >= 4)) { \
576  unsigned _mur_t = 0, _mur_d = 0; \
577  switch(_mur_align) { \
578  case 1: _mur_t |= _mur_key[2] << 16; \
579  /* FALLTHRU */ \
580  case 2: _mur_t |= _mur_key[1] << 8; \
581  /* FALLTHRU */ \
582  case 3: _mur_t |= _mur_key[0]; \
583  /* FALLTHRU */ \
584  default: break; \
585  } \
586  _mur_t <<= (8 * _mur_align); \
587  _mur_key += 4-_mur_align; \
588  _mur_len -= 4-_mur_align; \
589  int _mur_sl = 8 * (4-_mur_align); \
590  int _mur_sr = 8 * _mur_align; \
591  \
592  for (;_mur_len >= 4; _mur_len-=4) { \
593  _mur_d = *(unsigned *)_mur_key; \
594  _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
595  unsigned _mur_k = _mur_t; \
596  _mur_k *= _mur_m; \
597  _mur_k ^= _mur_k >> _mur_r; \
598  _mur_k *= _mur_m; \
599  hashv *= _mur_m; \
600  hashv ^= _mur_k; \
601  _mur_t = _mur_d; \
602  _mur_key += 4; \
603  } \
604  _mur_d = 0; \
605  if(_mur_len >= _mur_align) { \
606  switch(_mur_align) { \
607  case 3: _mur_d |= _mur_key[2] << 16; \
608  /* FALLTHRU */ \
609  case 2: _mur_d |= _mur_key[1] << 8; \
610  /* FALLTHRU */ \
611  case 1: _mur_d |= _mur_key[0]; \
612  /* FALLTHRU */ \
613  default: break; \
614  } \
615  unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
616  _mur_k *= _mur_m; \
617  _mur_k ^= _mur_k >> _mur_r; \
618  _mur_k *= _mur_m; \
619  hashv *= _mur_m; \
620  hashv ^= _mur_k; \
621  _mur_k += _mur_align; \
622  _mur_len -= _mur_align; \
623  \
624  switch(_mur_len) \
625  { \
626  case 3: hashv ^= _mur_key[2] << 16; \
627  /* FALLTHRU */ \
628  case 2: hashv ^= _mur_key[1] << 8; \
629  /* FALLTHRU */ \
630  case 1: hashv ^= _mur_key[0]; \
631  hashv *= _mur_m; \
632  /* FALLTHRU */ \
633  default: break; \
634  } \
635  } else { \
636  switch(_mur_len) \
637  { \
638  case 3: _mur_d ^= _mur_key[2] << 16; \
639  /* FALLTHRU */ \
640  case 2: _mur_d ^= _mur_key[1] << 8; \
641  /* FALLTHRU */ \
642  case 1: _mur_d ^= _mur_key[0]; \
643  /* FALLTHRU */ \
644  case 0: hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
645  hashv *= _mur_m; \
646  /* FALLTHRU */ \
647  default: break; \
648  } \
649  } \
650  \
651  hashv ^= hashv >> 13; \
652  hashv *= _mur_m; \
653  hashv ^= hashv >> 15; \
654  } else { \
655  for (;_mur_len >= 4; _mur_len-=4) { \
656  unsigned _mur_k = *(unsigned*)_mur_key; \
657  _mur_k *= _mur_m; \
658  _mur_k ^= _mur_k >> _mur_r; \
659  _mur_k *= _mur_m; \
660  hashv *= _mur_m; \
661  hashv ^= _mur_k; \
662  _mur_key += 4; \
663  } \
664  switch(_mur_len) \
665  { \
666  case 3: hashv ^= _mur_key[2] << 16; \
667  /* FALLTHRU */ \
668  case 2: hashv ^= _mur_key[1] << 8; \
669  /* FALLTHRU */ \
670  case 1: hashv ^= _mur_key[0]; \
671  hashv *= _mur_m; \
672  /* FALLTHRU */ \
673  default: break; \
674  } \
675  \
676  hashv ^= hashv >> 13; \
677  hashv *= _mur_m; \
678  hashv ^= hashv >> 15; \
679  } \
680  bkt = hashv & (num_bkts-1); \
681 } while(0)
682 #endif /* HASH_USING_NO_STRICT_ALIASING */
683 
684 /* key comparison function; return 0 if keys equal */
685 #define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
686 
687 /* iterate over items in a known bucket to find desired item */
688 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
689 do { \
690  if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \
691  else out=NULL; \
692  while (out) { \
693  if (out->hh.keylen == keylen_in) { \
694  if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \
695  } \
696  if (out->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,out->hh.hh_next)); \
697  else out = NULL; \
698  } \
699 } while(0)
700 
701 /* add an item to a bucket */
702 #define HASH_ADD_TO_BKT(head,addhh) \
703 do { \
704  head.count++; \
705  (addhh)->hh_next = head.hh_head; \
706  (addhh)->hh_prev = NULL; \
707  if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \
708  (head).hh_head=addhh; \
709  if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
710  && (addhh)->tbl->noexpand != 1) { \
711  HASH_EXPAND_BUCKETS((addhh)->tbl); \
712  } \
713 } while(0)
714 
715 /* remove an item from a given bucket */
716 #define HASH_DEL_IN_BKT(hh,head,hh_del) \
717  (head).count--; \
718  if ((head).hh_head == hh_del) { \
719  (head).hh_head = hh_del->hh_next; \
720  } \
721  if (hh_del->hh_prev) { \
722  hh_del->hh_prev->hh_next = hh_del->hh_next; \
723  } \
724  if (hh_del->hh_next) { \
725  hh_del->hh_next->hh_prev = hh_del->hh_prev; \
726  }
727 
728 /* Bucket expansion has the effect of doubling the number of buckets
729  * and redistributing the items into the new buckets. Ideally the
730  * items will distribute more or less evenly into the new buckets
731  * (the extent to which this is true is a measure of the quality of
732  * the hash function as it applies to the key domain).
733  *
734  * With the items distributed into more buckets, the chain length
735  * (item count) in each bucket is reduced. Thus by expanding buckets
736  * the hash keeps a bound on the chain length. This bounded chain
737  * length is the essence of how a hash provides constant time lookup.
738  *
739  * The calculation of tbl->ideal_chain_maxlen below deserves some
740  * explanation. First, keep in mind that we're calculating the ideal
741  * maximum chain length based on the *new* (doubled) bucket count.
742  * In fractions this is just n/b (n=number of items,b=new num buckets).
743  * Since the ideal chain length is an integer, we want to calculate
744  * ceil(n/b). We don't depend on floating point arithmetic in this
745  * hash, so to calculate ceil(n/b) with integers we could write
746  *
747  * ceil(n/b) = (n/b) + ((n%b)?1:0)
748  *
749  * and in fact a previous version of this hash did just that.
750  * But now we have improved things a bit by recognizing that b is
751  * always a power of two. We keep its base 2 log handy (call it lb),
752  * so now we can write this with a bit shift and logical AND:
753  *
754  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
755  *
756  */
757 #define HASH_EXPAND_BUCKETS(tbl) \
758 do { \
759  unsigned _he_bkt; \
760  unsigned _he_bkt_i; \
761  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
762  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
763  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
764  2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
765  if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
766  memset(_he_new_buckets, 0, \
767  2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
768  tbl->ideal_chain_maxlen = \
769  (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
770  ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
771  tbl->nonideal_items = 0; \
772  for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
773  { \
774  _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
775  while (_he_thh) { \
776  _he_hh_nxt = _he_thh->hh_next; \
777  HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
778  _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
779  if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
780  tbl->nonideal_items++; \
781  _he_newbkt->expand_mult = _he_newbkt->count / \
782  tbl->ideal_chain_maxlen; \
783  } \
784  _he_thh->hh_prev = NULL; \
785  _he_thh->hh_next = _he_newbkt->hh_head; \
786  if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
787  _he_thh; \
788  _he_newbkt->hh_head = _he_thh; \
789  _he_thh = _he_hh_nxt; \
790  } \
791  } \
792  uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
793  tbl->num_buckets *= 2; \
794  tbl->log2_num_buckets++; \
795  tbl->buckets = _he_new_buckets; \
796  tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
797  (tbl->ineff_expands+1) : 0; \
798  if (tbl->ineff_expands > 1) { \
799  tbl->noexpand=1; \
800  uthash_noexpand_fyi(tbl); \
801  } \
802  uthash_expand_fyi(tbl); \
803 } while(0)
804 
805 
806 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
807 /* Note that HASH_SORT assumes the hash handle name to be hh.
808  * HASH_SRT was added to allow the hash handle name to be passed in. */
809 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
810 #define HASH_SRT(hh,head,cmpfcn) \
811 do { \
812  unsigned _hs_i; \
813  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
814  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
815  if (head) { \
816  _hs_insize = 1; \
817  _hs_looping = 1; \
818  _hs_list = &((head)->hh); \
819  while (_hs_looping) { \
820  _hs_p = _hs_list; \
821  _hs_list = NULL; \
822  _hs_tail = NULL; \
823  _hs_nmerges = 0; \
824  while (_hs_p) { \
825  _hs_nmerges++; \
826  _hs_q = _hs_p; \
827  _hs_psize = 0; \
828  for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
829  _hs_psize++; \
830  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
831  ((void*)((char*)(_hs_q->next) + \
832  (head)->hh.tbl->hho)) : NULL); \
833  if (! (_hs_q) ) break; \
834  } \
835  _hs_qsize = _hs_insize; \
836  while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
837  if (_hs_psize == 0) { \
838  _hs_e = _hs_q; \
839  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
840  ((void*)((char*)(_hs_q->next) + \
841  (head)->hh.tbl->hho)) : NULL); \
842  _hs_qsize--; \
843  } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
844  _hs_e = _hs_p; \
845  _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
846  ((void*)((char*)(_hs_p->next) + \
847  (head)->hh.tbl->hho)) : NULL); \
848  _hs_psize--; \
849  } else if (( \
850  cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
851  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
852  ) <= 0) { \
853  _hs_e = _hs_p; \
854  _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
855  ((void*)((char*)(_hs_p->next) + \
856  (head)->hh.tbl->hho)) : NULL); \
857  _hs_psize--; \
858  } else { \
859  _hs_e = _hs_q; \
860  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
861  ((void*)((char*)(_hs_q->next) + \
862  (head)->hh.tbl->hho)) : NULL); \
863  _hs_qsize--; \
864  } \
865  if ( _hs_tail ) { \
866  _hs_tail->next = ((_hs_e) ? \
867  ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
868  } else { \
869  _hs_list = _hs_e; \
870  } \
871  _hs_e->prev = ((_hs_tail) ? \
872  ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
873  _hs_tail = _hs_e; \
874  } \
875  _hs_p = _hs_q; \
876  } \
877  _hs_tail->next = NULL; \
878  if ( _hs_nmerges <= 1 ) { \
879  _hs_looping=0; \
880  (head)->hh.tbl->tail = _hs_tail; \
881  DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
882  } \
883  _hs_insize *= 2; \
884  } \
885  HASH_FSCK(hh,head); \
886  } \
887 } while (0)
888 
889 /* This function selects items from one hash into another hash.
890  * The end result is that the selected items have dual presence
891  * in both hashes. There is no copy of the items made; rather
892  * they are added into the new hash through a secondary hash
893  * hash handle that must be present in the structure. */
894 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
895 do { \
896  unsigned _src_bkt, _dst_bkt; \
897  void *_last_elt=NULL, *_elt; \
898  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
899  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
900  if (src) { \
901  for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
902  for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
903  _src_hh; \
904  _src_hh = _src_hh->hh_next) { \
905  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
906  if (cond(_elt)) { \
907  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
908  _dst_hh->key = _src_hh->key; \
909  _dst_hh->keylen = _src_hh->keylen; \
910  _dst_hh->hashv = _src_hh->hashv; \
911  _dst_hh->prev = _last_elt; \
912  _dst_hh->next = NULL; \
913  if (_last_elt_hh) { _last_elt_hh->next = _elt; } \
914  if (!dst) { \
915  DECLTYPE_ASSIGN(dst,_elt); \
916  HASH_MAKE_TABLE(hh_dst,dst); \
917  } else { \
918  _dst_hh->tbl = (dst)->hh_dst.tbl; \
919  } \
920  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
921  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
922  (dst)->hh_dst.tbl->num_items++; \
923  _last_elt = _elt; \
924  _last_elt_hh = _dst_hh; \
925  } \
926  } \
927  } \
928  } \
929  HASH_FSCK(hh_dst,dst); \
930 } while (0)
931 
932 #define HASH_CLEAR(hh,head) \
933 do { \
934  if (head) { \
935  uthash_free((head)->hh.tbl->buckets, \
936  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
937  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
938  (head)=NULL; \
939  } \
940 } while(0)
941 
942 #ifdef NO_DECLTYPE
943 #define HASH_ITER(hh,head,el,tmp) \
944 for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \
945  el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL))
946 #else
947 #define HASH_ITER(hh,head,el,tmp) \
948 for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \
949  el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL))
950 #endif
951 
952 /* obtain a count of items in the hash */
953 #define HASH_COUNT(head) HASH_CNT(hh,head)
954 #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0)
955 
956 typedef struct UT_hash_bucket {
957  struct UT_hash_handle *hh_head;
958  unsigned count;
959 
960  /* expand_mult is normally set to 0. In this situation, the max chain length
961  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
962  * the bucket's chain exceeds this length, bucket expansion is triggered).
963  * However, setting expand_mult to a non-zero value delays bucket expansion
964  * (that would be triggered by additions to this particular bucket)
965  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
966  * (The multiplier is simply expand_mult+1). The whole idea of this
967  * multiplier is to reduce bucket expansions, since they are expensive, in
968  * situations where we know that a particular bucket tends to be overused.
969  * It is better to let its chain length grow to a longer yet-still-bounded
970  * value, than to do an O(n) bucket expansion too often.
971  */
972  unsigned expand_mult;
973 
975 
976 /* random signature used only to find hash tables in external analysis */
977 #define HASH_SIGNATURE 0xa0111fe1
978 #define HASH_BLOOM_SIGNATURE 0xb12220f2
979 
980 typedef struct UT_hash_table {
981  UT_hash_bucket *buckets;
982  unsigned num_buckets, log2_num_buckets;
983  unsigned num_items;
984  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
985  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
986 
987  /* in an ideal situation (all buckets used equally), no bucket would have
988  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
989  unsigned ideal_chain_maxlen;
990 
991  /* nonideal_items is the number of items in the hash whose chain position
992  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
993  * hash distribution; reaching them in a chain traversal takes >ideal steps */
994  unsigned nonideal_items;
995 
996  /* ineffective expands occur when a bucket doubling was performed, but
997  * afterward, more than half the items in the hash had nonideal chain
998  * positions. If this happens on two consecutive expansions we inhibit any
999  * further expansion, as it's not helping; this happens when the hash
1000  * function isn't a good fit for the key domain. When expansion is inhibited
1001  * the hash will still work, albeit no longer in constant time. */
1002  unsigned ineff_expands, noexpand;
1003 
1004  uint32_t signature; /* used only to find hash tables in external analysis */
1005 #ifdef HASH_BLOOM
1006  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1007  uint8_t *bloom_bv;
1008  char bloom_nbits;
1009 #endif
1010 
1011 } UT_hash_table;
1012 
1013 typedef struct UT_hash_handle {
1014  struct UT_hash_table *tbl;
1015  void *prev; /* prev element in app order */
1016  void *next; /* next element in app order */
1017  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1018  struct UT_hash_handle *hh_next; /* next hh in bucket order */
1019  void *key; /* ptr to enclosing struct's key */
1020  unsigned keylen; /* enclosing struct's key len */
1021  unsigned hashv; /* result of hash-fcn(key) */
1022 } UT_hash_handle;
1023 
1024 #endif /* UTHASH_H */
UT_hash_bucket
Definition: uthash.h:956
UT_hash_handle
Definition: uthash.h:1013
UT_hash_table
Definition: uthash.h:980