Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_hash_map.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_hash_map_H
18 #define __TBB_concurrent_hash_map_H
19 
20 #define __TBB_concurrent_hash_map_H_include_area
22 
23 #include "tbb_stddef.h"
24 #include <iterator>
25 #include <utility> // Need std::pair
26 #include <cstring> // Need std::memset
27 #include __TBB_STD_SWAP_HEADER
28 
29 #include "tbb_allocator.h"
30 #include "spin_rw_mutex.h"
31 #include "atomic.h"
32 #include "tbb_exception.h"
33 #include "tbb_profiling.h"
34 #include "aligned_space.h"
38 #if __TBB_INITIALIZER_LISTS_PRESENT
39 #include <initializer_list>
40 #endif
41 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
42 #include <typeinfo>
43 #endif
44 #if __TBB_STATISTICS
45 #include <stdio.h>
46 #endif
47 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
48 // Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
49 // for most of platforms, tuple present macro was added for logical correctness
50 #include <tuple>
51 #endif
52 
53 namespace tbb {
54 
55 namespace interface5 {
56 
57  template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
59 
61  namespace internal {
62  using namespace tbb::internal;
63 
64 
66  typedef size_t hashcode_t;
75  mutex_t mutex;
76  };
78  static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
80  static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
82  class hash_map_base {
83  public:
85  typedef size_t size_type;
87  typedef size_t hashcode_t;
89  typedef size_t segment_index_t;
98  mutex_t mutex;
99  node_base *node_list;
100  };
102  static size_type const embedded_block = 1;
104  static size_type const embedded_buckets = 1<<embedded_block;
106  static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
108  static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
112  typedef segment_ptr_t segments_table_t[pointers_per_table];
114  atomic<hashcode_t> my_mask;
116  segments_table_t my_table;
118  atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
120  bucket my_embedded_segment[embedded_buckets];
121 #if __TBB_STATISTICS
122  atomic<unsigned> my_info_resizes; // concurrent ones
123  mutable atomic<unsigned> my_info_restarts; // race collisions
124  atomic<unsigned> my_info_rehashes; // invocations of rehash_bucket
125 #endif
126  hash_map_base() {
128  std::memset(my_table, 0, sizeof(my_table));
129  my_mask = 0;
130  my_size = 0;
131  std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment));
132  for( size_type i = 0; i < embedded_block; i++ ) // fill the table
133  my_table[i] = my_embedded_segment + segment_base(i);
134  my_mask = embedded_buckets - 1;
135  __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
136 #if __TBB_STATISTICS
137  my_info_resizes = 0; // concurrent ones
138  my_info_restarts = 0; // race collisions
139  my_info_rehashes = 0; // invocations of rehash_bucket
140 #endif
141  }
142 
144  static segment_index_t segment_index_of( size_type index ) {
145  return segment_index_t( __TBB_Log2( index|1 ) );
146  }
147 
149  static segment_index_t segment_base( segment_index_t k ) {
150  return (segment_index_t(1)<<k & ~segment_index_t(1));
151  }
152 
154  static size_type segment_size( segment_index_t k ) {
155  return size_type(1)<<k; // fake value for k==0
156  }
157 
159  static bool is_valid( void *ptr ) {
160  return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
161  }
162 
164  static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
165  if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
166  else for(size_type i = 0; i < sz; i++, ptr++) {
167  *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
168  ptr->node_list = rehash_req;
169  }
170  }
171 
173  static void add_to_bucket( bucket *b, node_base *n ) {
174  __TBB_ASSERT(b->node_list != rehash_req, NULL);
175  n->next = b->node_list;
176  b->node_list = n; // its under lock and flag is set
177  }
178 
181  segment_ptr_t *my_segment_ptr;
182  enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
184  if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
185  }
186  };
187 
189  template<typename Allocator>
190  void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
191  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
192  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
193  bucket_allocator_type bucket_allocator(allocator);
194  __TBB_ASSERT( k, "Zero segment must be embedded" );
195  enable_segment_failsafe watchdog( my_table, k );
196  size_type sz;
197  __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
198  if( k >= first_block ) {
199  sz = segment_size( k );
200  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
201  init_buckets( ptr, sz, is_initial );
202  itt_hide_store_word( my_table[k], ptr );
203  sz <<= 1;// double it to get entire capacity of the container
204  } else { // the first block
205  __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
206  sz = segment_size( first_block );
207  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
208  init_buckets( ptr, sz - embedded_buckets, is_initial );
209  ptr -= segment_base(embedded_block);
210  for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
211  itt_hide_store_word( my_table[i], ptr + segment_base(i) );
212  }
213  itt_store_word_with_release( my_mask, sz-1 );
214  watchdog.my_segment_ptr = 0;
215  }
216 
217  template<typename Allocator>
218  void delete_segment(segment_index_t s, const Allocator& allocator) {
219  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
220  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
221  bucket_allocator_type bucket_allocator(allocator);
222  segment_ptr_t buckets_ptr = my_table[s];
223  size_type sz = segment_size( s ? s : 1 );
224 
225  if( s >= first_block) // the first segment or the next
226  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
227  else if( s == embedded_block && embedded_block != first_block )
228  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
229  segment_size(first_block) - embedded_buckets);
230  if( s >= embedded_block ) my_table[s] = 0;
231  }
232 
234  bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
235  segment_index_t s = segment_index_of( h );
236  h -= segment_base(s);
237  segment_ptr_t seg = my_table[s];
238  __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
239  return &seg[h];
240  }
241 
242  // internal serial rehashing helper
243  void mark_rehashed_levels( hashcode_t h ) throw () {
244  segment_index_t s = segment_index_of( h );
245  while( segment_ptr_t seg = my_table[++s] )
246  if( seg[h].node_list == rehash_req ) {
247  seg[h].node_list = empty_rehashed;
248  mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
249  }
250  }
251 
253  // Splitting into two functions should help inlining
254  inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
255  hashcode_t m_now, m_old = m;
256  m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
257  if( m_old != m_now )
258  return check_rehashing_collision( h, m_old, m = m_now );
259  return false;
260  }
261 
263  bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
264  __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
265  if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
266  // condition above proves that 'h' has some other bits set beside 'm_old'
267  // find next applicable mask after m_old //TODO: look at bsl instruction
268  for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
269  ;
270  m_old = (m_old<<1) - 1; // get full mask from a bit
271  __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
272  // check whether it is rehashing/ed
273  if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
274  {
275 #if __TBB_STATISTICS
276  my_info_restarts++; // race collisions
277 #endif
278  return true;
279  }
280  }
281  return false;
282  }
283 
285  segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
286  size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
287  add_to_bucket( b, n );
288  // check load factor
289  if( sz >= mask ) { // TODO: add custom load_factor
290  segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
291  __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
292  static const segment_ptr_t is_allocating = (segment_ptr_t)2;
293  if( !itt_hide_load_word(my_table[new_seg])
294  && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
295  return new_seg; // The value must be processed
296  }
297  return 0;
298  }
299 
301  template<typename Allocator>
302  void reserve(size_type buckets, const Allocator& allocator) {
303  if( !buckets-- ) return;
304  bool is_initial = !my_size;
305  for( size_type m = my_mask; buckets > m; m = my_mask )
306  enable_segment( segment_index_of( m+1 ), allocator, is_initial );
307  }
310  using std::swap;
311  swap(this->my_mask, table.my_mask);
312  swap(this->my_size, table.my_size);
313  for(size_type i = 0; i < embedded_buckets; i++)
314  swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
315  for(size_type i = embedded_block; i < pointers_per_table; i++)
316  swap(this->my_table[i], table.my_table[i]);
317  }
318 
319 #if __TBB_CPP11_RVALUE_REF_PRESENT
320  void internal_move(hash_map_base&& other) {
321  my_mask = other.my_mask;
322  other.my_mask = embedded_buckets - 1;
323  my_size = other.my_size;
324  other.my_size = 0;
325 
326  for(size_type i = 0; i < embedded_buckets; ++i) {
327  my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
328  other.my_embedded_segment[i].node_list = NULL;
329  }
330 
331  for(size_type i = embedded_block; i < pointers_per_table; ++i) {
332  my_table[i] = other.my_table[i];
333  other.my_table[i] = NULL;
334  }
335  }
336 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
337  };
338 
339  template<typename Iterator>
341 
343 
345  template<typename Container, typename Value>
347  : public std::iterator<std::forward_iterator_tag,Value>
348  {
349  typedef Container map_type;
350  typedef typename Container::node node;
353 
354  template<typename C, typename T, typename U>
355  friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
356 
357  template<typename C, typename T, typename U>
358  friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
359 
360  template<typename C, typename T, typename U>
361  friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
362 
363  template<typename C, typename U>
364  friend class hash_map_iterator;
365 
366  template<typename I>
367  friend class hash_map_range;
368 
369  void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
370  size_t k = my_index+1;
371  __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
372  while( k <= my_map->my_mask ) {
373  // Following test uses 2's-complement wizardry
374  if( k&(k-2) ) // not the beginning of a segment
375  ++my_bucket;
376  else my_bucket = my_map->get_bucket( k );
377  my_node = static_cast<node*>( my_bucket->node_list );
378  if( hash_map_base::is_valid(my_node) ) {
379  my_index = k; return;
380  }
381  ++k;
382  }
383  my_bucket = 0; my_node = 0; my_index = k; // the end
384  }
385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
386  template<typename Key, typename T, typename HashCompare, typename A>
388 #else
389  public: // workaround
390 #endif
391  const Container *my_map;
393 
395  size_t my_index;
396 
398  const bucket *my_bucket;
399 
401  node *my_node;
402 
403  hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
404 
405  public:
407  hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
409  my_map(other.my_map),
410  my_index(other.my_index),
411  my_bucket(other.my_bucket),
412  my_node(other.my_node)
413  {}
414 
416  my_map = other.my_map;
417  my_index = other.my_index;
418  my_bucket = other.my_bucket;
419  my_node = other.my_node;
420  return *this;
421  }
422  Value& operator*() const {
423  __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
424  return my_node->value();
425  }
426  Value* operator->() const {return &operator*();}
427  hash_map_iterator& operator++();
428 
431  hash_map_iterator old(*this);
432  operator++();
433  return old;
434  }
435  };
436 
437  template<typename Container, typename Value>
438  hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
439  my_map(&map),
440  my_index(index),
441  my_bucket(b),
442  my_node( static_cast<node*>(n) )
443  {
444  if( b && !hash_map_base::is_valid(n) )
446  }
447 
448  template<typename Container, typename Value>
450  my_node = static_cast<node*>( my_node->next );
452  return *this;
453  }
454 
455  template<typename Container, typename T, typename U>
457  return i.my_node == j.my_node && i.my_map == j.my_map;
458  }
459 
460  template<typename Container, typename T, typename U>
462  return i.my_node != j.my_node || i.my_map != j.my_map;
463  }
464 
466 
467  template<typename Iterator>
468  class hash_map_range {
469  typedef typename Iterator::map_type map_type;
470  Iterator my_begin;
471  Iterator my_end;
472  mutable Iterator my_midpoint;
473  size_t my_grainsize;
475  void set_midpoint() const;
476  template<typename U> friend class hash_map_range;
477  public:
479  typedef std::size_t size_type;
480  typedef typename Iterator::value_type value_type;
481  typedef typename Iterator::reference reference;
482  typedef typename Iterator::difference_type difference_type;
483  typedef Iterator iterator;
484 
486  bool empty() const {return my_begin==my_end;}
487 
489  bool is_divisible() const {
490  return my_midpoint!=my_end;
491  }
494  my_end(r.my_end),
495  my_grainsize(r.my_grainsize)
496  {
497  r.my_end = my_begin = r.my_midpoint;
498  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
499  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
500  set_midpoint();
501  r.set_midpoint();
502  }
504  template<typename U>
506  my_begin(r.my_begin),
507  my_end(r.my_end),
508  my_midpoint(r.my_midpoint),
509  my_grainsize(r.my_grainsize)
510  {}
512  hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
513  my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
514  my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
515  my_grainsize( grainsize_ )
516  {
517  __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
518  set_midpoint();
519  }
520  const Iterator& begin() const {return my_begin;}
521  const Iterator& end() const {return my_end;}
523  size_type grainsize() const {return my_grainsize;}
524  };
525 
526  template<typename Iterator>
528  // Split by groups of nodes
529  size_t m = my_end.my_index-my_begin.my_index;
530  if( m > my_grainsize ) {
531  m = my_begin.my_index + m/2u;
532  hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
533  my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
534  } else {
535  my_midpoint = my_end;
536  }
537  __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
538  "my_begin is after my_midpoint" );
539  __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
540  "my_midpoint is after my_end" );
541  __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
542  "[my_begin, my_midpoint) range should not be empty" );
543  }
544 
545  } // internal
547 
548 #if _MSC_VER && !defined(__INTEL_COMPILER)
549  // Suppress "conditional expression is constant" warning.
550  #pragma warning( push )
551  #pragma warning( disable: 4127 )
552 #endif
553 
555 
584 template<typename Key, typename T, typename HashCompare, typename Allocator>
586  template<typename Container, typename Value>
588 
589  template<typename I>
591 
592 public:
593  typedef Key key_type;
594  typedef T mapped_type;
595  typedef std::pair<const Key,T> value_type;
597  typedef ptrdiff_t difference_type;
598  typedef value_type *pointer;
599  typedef const value_type *const_pointer;
600  typedef value_type &reference;
601  typedef const value_type &const_reference;
606  typedef Allocator allocator_type;
607 
608 protected:
609  friend class const_accessor;
610  class node;
613  node_allocator_type my_allocator;
614  HashCompare my_hash_compare;
615 
616  class node : public node_base {
617  tbb::aligned_space<value_type> my_value;
618  public:
619  value_type* storage() { return my_value.begin(); }
620  value_type& value() { return *storage(); }
621  };
622 
623  void delete_node( node_base *n ) {
624  node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
625  node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
626  node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
627  }
628 
631  node_allocator_type& my_alloc;
632 
633  node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {}
635  if(my_node) {
636  node_allocator_traits::destroy(my_alloc, my_node);
637  node_allocator_traits::deallocate(my_alloc, my_node, 1);
638  }
639  }
640  void dismiss() { my_node = NULL; }
641  };
642 
643 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
644  template<typename... Args>
645  static node* create_node(node_allocator_type& allocator, Args&&... args)
646 #else
647  template<typename Arg1, typename Arg2>
648  static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
649 #endif
650  {
651  node* node_ptr = node_allocator_traits::allocate(allocator, 1);
652  node_scoped_guard guard(node_ptr, allocator);
653  node_allocator_traits::construct(allocator, node_ptr);
654 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
655  node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
656 #else
657  node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
658 #endif
659  guard.dismiss();
660  return node_ptr;
661  }
662 
663  static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
664  return create_node(allocator, key, *t);
665  }
666 
667 #if __TBB_CPP11_RVALUE_REF_PRESENT
668  static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
669  return create_node(allocator, key, std::move(*const_cast<T*>(t)));
670  }
671 #endif
672 
673  static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
674 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
675  // Emplace construct an empty T object inside the pair
676  return create_node(allocator, std::piecewise_construct,
677  std::forward_as_tuple(key), std::forward_as_tuple());
678 #else
679  T obj; // Use of temporary object in impossible, because create_node takes non-const reference
680  return create_node(allocator, key, tbb::internal::move(obj));
681 #endif
682  }
683 
684  static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
685  __TBB_ASSERT(false,"this dummy function should not be called");
686  return NULL;
687  }
688 
689  node *search_bucket( const key_type &key, bucket *b ) const {
690  node *n = static_cast<node*>( b->node_list );
691  while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
692  n = static_cast<node*>( n->next );
693  __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
694  return n;
695  }
696 
700  public:
701  bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
703  inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
704  my_b = base->get_bucket( h );
705  // TODO: actually, notification is unnecessary here, just hiding double-check
707  && try_acquire( my_b->mutex, /*write=*/true ) )
708  {
709  if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
710  }
711  else bucket::scoped_t::acquire( my_b->mutex, writer );
713  }
717  bucket *operator() () { return my_b; }
718  };
719 
720  // TODO refactor to hash_base
721  void rehash_bucket( bucket *b_new, const hashcode_t h ) {
722  __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
723  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
725  hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
726 #if __TBB_STATISTICS
727  my_info_rehashes++; // invocations of rehash_bucket
728 #endif
729 
730  bucket_accessor b_old( this, h & mask );
731 
732  mask = (mask<<1) | 1; // get full mask for new bucket
733  __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
734  restart:
735  for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
736  hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
737 #if TBB_USE_ASSERT
738  hashcode_t bmask = h & (mask>>1);
739  bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
740  __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
741 #endif
742  if( (c & mask) == h ) {
743  if( !b_old.is_writer() )
744  if( !b_old.upgrade_to_writer() ) {
745  goto restart; // node ptr can be invalid due to concurrent erase
746  }
747  *p = n->next; // exclude from b_old
748  add_to_bucket( b_new, n );
749  } else p = &n->next; // iterate to next item
750  }
751  }
752 
755  call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {}
756  void dismiss() {my_ch_map = 0;}
758  if (my_ch_map){
759  my_ch_map->clear();
760  }
761  }
762  };
763 public:
764 
765  class accessor;
767  class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
768  friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
769  friend class accessor;
770  public:
773 
775  bool empty() const { return !my_node; }
776 
778  void release() {
779  if( my_node ) {
781  my_node = 0;
782  }
783  }
784 
786  const_reference operator*() const {
787  __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
788  return my_node->value();
789  }
790 
792  const_pointer operator->() const {
793  return &operator*();
794  }
795 
797  const_accessor() : my_node(NULL) {}
798 
801  my_node = NULL; // scoped lock's release() is called in its destructor
802  }
803  protected:
804  bool is_writer() { return node::scoped_t::is_writer; }
806  hashcode_t my_hash;
807  };
808 
810  class accessor: public const_accessor {
811  public:
814 
816  reference operator*() const {
817  __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
818  return this->my_node->value();
819  }
820 
822  pointer operator->() const {
823  return &operator*();
824  }
825  };
826 
828  explicit concurrent_hash_map( const allocator_type &a = allocator_type() )
829  : internal::hash_map_base(), my_allocator(a)
830  {}
831 
832  explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
833  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
834  {}
835 
837  concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
838  : internal::hash_map_base(), my_allocator(a)
839  {
840  reserve( n, my_allocator );
841  }
842 
843  concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
844  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
845  {
846  reserve( n, my_allocator );
847  }
848 
851  : internal::hash_map_base(),
852  my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
853  {
854  call_clear_on_leave scope_guard(this);
855  internal_copy(table);
856  scope_guard.dismiss();
857  }
858 
859  concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
860  : internal::hash_map_base(), my_allocator(a)
861  {
862  call_clear_on_leave scope_guard(this);
863  internal_copy(table);
864  scope_guard.dismiss();
865  }
866 
867 #if __TBB_CPP11_RVALUE_REF_PRESENT
870  : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
871  {
872  internal_move(std::move(table));
873  }
874 
876  concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
877  : internal::hash_map_base(), my_allocator(a)
878  {
879  if (a == table.get_allocator()){
880  internal_move(std::move(table));
881  }else{
882  call_clear_on_leave scope_guard(this);
883  internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
884  scope_guard.dismiss();
885  }
886  }
887 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
888 
890  template<typename I>
891  concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
892  : internal::hash_map_base(), my_allocator(a)
893  {
894  call_clear_on_leave scope_guard(this);
895  internal_copy(first, last, std::distance(first, last));
896  scope_guard.dismiss();
897  }
898 
899  template<typename I>
900  concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
901  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
902  {
903  call_clear_on_leave scope_guard(this);
904  internal_copy(first, last, std::distance(first, last));
905  scope_guard.dismiss();
906  }
907 
908 #if __TBB_INITIALIZER_LISTS_PRESENT
909  concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
911  : internal::hash_map_base(), my_allocator(a)
912  {
913  call_clear_on_leave scope_guard(this);
914  internal_copy(il.begin(), il.end(), il.size());
915  scope_guard.dismiss();
916  }
917 
918  concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
919  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
920  {
921  call_clear_on_leave scope_guard(this);
922  internal_copy(il.begin(), il.end(), il.size());
923  scope_guard.dismiss();
924  }
925 
926 #endif //__TBB_INITIALIZER_LISTS_PRESENT
927 
930  if( this!=&table ) {
932  clear();
933  tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type());
934  internal_copy(table);
935  }
936  return *this;
937  }
938 
939 #if __TBB_CPP11_RVALUE_REF_PRESENT
942  if(this != &table) {
944  internal_move_assign(std::move(table), pocma_type());
945  }
946  return *this;
947  }
948 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
949 
950 #if __TBB_INITIALIZER_LISTS_PRESENT
951  concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
953  clear();
954  internal_copy(il.begin(), il.end(), il.size());
955  return *this;
956  }
957 #endif //__TBB_INITIALIZER_LISTS_PRESENT
958 
959 
961 
963  void rehash(size_type n = 0);
964 
966  void clear();
967 
969  ~concurrent_hash_map() { clear(); }
970 
971  //------------------------------------------------------------------------
972  // Parallel algorithm support
973  //------------------------------------------------------------------------
974  range_type range( size_type grainsize=1 ) {
975  return range_type( *this, grainsize );
976  }
977  const_range_type range( size_type grainsize=1 ) const {
978  return const_range_type( *this, grainsize );
979  }
980 
981  //------------------------------------------------------------------------
982  // STL support - not thread-safe methods
983  //------------------------------------------------------------------------
984  iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
985  iterator end() { return iterator( *this, 0, 0, 0 ); }
986  const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
987  const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
988  std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
989  std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
990 
992  size_type size() const { return my_size; }
993 
995  bool empty() const { return my_size == 0; }
996 
998  size_type max_size() const {return (~size_type(0))/sizeof(node);}
999 
1001  size_type bucket_count() const { return my_mask+1; }
1002 
1004  allocator_type get_allocator() const { return this->my_allocator; }
1005 
1007  void swap( concurrent_hash_map &table );
1008 
1009  //------------------------------------------------------------------------
1010  // concurrent map operations
1011  //------------------------------------------------------------------------
1012 
1014  size_type count( const Key &key ) const {
1015  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
1016  }
1017 
1019 
1020  bool find( const_accessor &result, const Key &key ) const {
1021  result.release();
1022  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
1023  }
1024 
1026 
1027  bool find( accessor &result, const Key &key ) {
1028  result.release();
1029  return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1030  }
1031 
1033 
1034  bool insert( const_accessor &result, const Key &key ) {
1035  result.release();
1036  return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1037  }
1038 
1040 
1041  bool insert( accessor &result, const Key &key ) {
1042  result.release();
1043  return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1044  }
1045 
1047 
1048  bool insert( const_accessor &result, const value_type &value ) {
1049  result.release();
1050  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1051  }
1052 
1054 
1055  bool insert( accessor &result, const value_type &value ) {
1056  result.release();
1057  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1058  }
1059 
1061 
1062  bool insert( const value_type &value ) {
1063  return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1064  }
1065 
1066 #if __TBB_CPP11_RVALUE_REF_PRESENT
1067 
1069  bool insert( const_accessor &result, value_type && value ) {
1070  return generic_move_insert(result, std::move(value));
1071  }
1072 
1074 
1075  bool insert( accessor &result, value_type && value ) {
1076  return generic_move_insert(result, std::move(value));
1077  }
1078 
1080 
1081  bool insert( value_type && value ) {
1082  return generic_move_insert(accessor_not_used(), std::move(value));
1083  }
1084 
1085 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1086 
1088  template<typename... Args>
1089  bool emplace( const_accessor &result, Args&&... args ) {
1090  return generic_emplace(result, std::forward<Args>(args)...);
1091  }
1092 
1094 
1095  template<typename... Args>
1096  bool emplace( accessor &result, Args&&... args ) {
1097  return generic_emplace(result, std::forward<Args>(args)...);
1098  }
1099 
1101 
1102  template<typename... Args>
1103  bool emplace( Args&&... args ) {
1104  return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1105  }
1106 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1107 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1108 
1110  template<typename I>
1111  void insert( I first, I last ) {
1112  for ( ; first != last; ++first )
1113  insert( *first );
1114  }
1115 
1116 #if __TBB_INITIALIZER_LISTS_PRESENT
1117  void insert( std::initializer_list<value_type> il ) {
1119  insert( il.begin(), il.end() );
1120  }
1121 #endif //__TBB_INITIALIZER_LISTS_PRESENT
1122 
1124 
1125  bool erase( const Key& key );
1126 
1128 
1129  bool erase( const_accessor& item_accessor ) {
1130  return exclude( item_accessor );
1131  }
1132 
1134 
1135  bool erase( accessor& item_accessor ) {
1136  return exclude( item_accessor );
1137  }
1138 
1139 protected:
1141  bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ;
1142 
1143  struct accessor_not_used { void release(){}};
1144  friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
1145  friend const_accessor* accessor_location( const_accessor & a ) { return &a;}
1146 
1147  friend bool is_write_access_needed( accessor const& ) { return true;}
1148  friend bool is_write_access_needed( const_accessor const& ) { return false;}
1149  friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
1150 
1151 #if __TBB_CPP11_RVALUE_REF_PRESENT
1152  template<typename Accessor>
1153  bool generic_move_insert( Accessor && result, value_type && value ) {
1154  result.release();
1155  return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1156  }
1157 
1158 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1159  template<typename Accessor, typename... Args>
1160  bool generic_emplace( Accessor && result, Args &&... args ) {
1161  result.release();
1162  node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1163  return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1164  }
1165 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1166 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1167 
1169  bool exclude( const_accessor &item_accessor );
1170 
1172  template<typename I>
1173  std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1174 
1176  void internal_copy( const concurrent_hash_map& source );
1177 
1178  template<typename I>
1179  void internal_copy( I first, I last, size_type reserve_size );
1180 
1181 #if __TBB_CPP11_RVALUE_REF_PRESENT
1182  // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type
1185  internal_move(std::move(other));
1186  }
1187 
1189  if (this->my_allocator == other.my_allocator) {
1190  internal_move(std::move(other));
1191  } else {
1192  //do per element move
1193  internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1194  }
1195  }
1196 #endif
1197 
1199 
1201  const_pointer internal_fast_find( const Key& key ) const {
1202  hashcode_t h = my_hash_compare.hash( key );
1203  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1204  node *n;
1205  restart:
1206  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1207  bucket *b = get_bucket( h & m );
1208  // TODO: actually, notification is unnecessary here, just hiding double-check
1210  {
1212  if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1213  if( b->node_list == internal::rehash_req)
1214  const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1215  }
1216  else lock.acquire( b->mutex, /*write=*/false );
1218  }
1219  n = search_bucket( key, b );
1220  if( n )
1221  return &n->item;
1222  else if( check_mask_race( h, m ) )
1223  goto restart;
1224  return 0;
1225  }
1226 };
1227 
1228 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1229 namespace internal {
1230 using namespace tbb::internal;
1231 
1232 template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1233 using hash_map_t = Map<
1234  Key, T,
1235  std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1236  pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1237  std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1238  pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1239 >;
1240 }
1241 
1242 // Deduction guide for the constructor from two iterators and hash_compare/ allocator
1243 template<typename I, typename... Args>
1244 concurrent_hash_map(I, I, Args...)
1245 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1246 
1247 // Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
1248 // Deduction guide for an initializer_list, hash_compare and allocator is implicit
1249 template<typename Key, typename T, typename CompareOrAllocator>
1250 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1251 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1252 
1253 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1254 
1255 template<typename Key, typename T, typename HashCompare, typename A>
1256 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1257  __TBB_ASSERT( !result || !result->my_node, NULL );
1258  bool return_value;
1259  hashcode_t const h = my_hash_compare.hash( key );
1260  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1261  segment_index_t grow_segment = 0;
1262  node *n;
1263  restart:
1264  {//lock scope
1265  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1266  return_value = false;
1267  // get bucket
1268  bucket_accessor b( this, h & m );
1269 
1270  // find a node
1271  n = search_bucket( key, b() );
1272  if( op_insert ) {
1273  // [opt] insert a key
1274  if( !n ) {
1275  if( !tmp_n ) {
1276  tmp_n = allocate_node(my_allocator, key, t);
1277  }
1278  if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1279  // Rerun search_list, in case another thread inserted the item during the upgrade.
1280  n = search_bucket( key, b() );
1281  if( is_valid(n) ) { // unfortunately, it did
1282  b.downgrade_to_reader();
1283  goto exists;
1284  }
1285  }
1286  if( check_mask_race(h, m) )
1287  goto restart; // b.release() is done in ~b().
1288  // insert and set flag to grow the container
1289  grow_segment = insert_new_node( b(), n = tmp_n, m );
1290  tmp_n = 0;
1291  return_value = true;
1292  }
1293  } else { // find or count
1294  if( !n ) {
1295  if( check_mask_race( h, m ) )
1296  goto restart; // b.release() is done in ~b(). TODO: replace by continue
1297  return false;
1298  }
1299  return_value = true;
1300  }
1301  exists:
1302  if( !result ) goto check_growth;
1303  // TODO: the following seems as generic/regular operation
1304  // acquire the item
1305  if( !result->try_acquire( n->mutex, write ) ) {
1306  for( tbb::internal::atomic_backoff backoff(true);; ) {
1307  if( result->try_acquire( n->mutex, write ) ) break;
1308  if( !backoff.bounded_pause() ) {
1309  // the wait takes really long, restart the operation
1310  b.release();
1311  __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1312  __TBB_Yield();
1313  m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1314  goto restart;
1315  }
1316  }
1317  }
1318  }//lock scope
1319  result->my_node = n;
1320  result->my_hash = h;
1321 check_growth:
1322  // [opt] grow the container
1323  if( grow_segment ) {
1324 #if __TBB_STATISTICS
1325  my_info_resizes++; // concurrent ones
1326 #endif
1327  enable_segment( grow_segment, my_allocator );
1328  }
1329  if( tmp_n ) // if op_insert only
1330  delete_node( tmp_n );
1331  return return_value;
1332 }
1333 
1334 template<typename Key, typename T, typename HashCompare, typename A>
1335 template<typename I>
1336 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1337  hashcode_t h = my_hash_compare.hash( key );
1338  hashcode_t m = my_mask;
1339  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1340  h &= m;
1341  bucket *b = get_bucket( h );
1342  while( b->node_list == internal::rehash_req ) {
1343  m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1344  b = get_bucket( h &= m );
1345  }
1346  node *n = search_bucket( key, b );
1347  if( !n )
1348  return std::make_pair(end_, end_);
1349  iterator lower(*this, h, b, n), upper(lower);
1350  return std::make_pair(lower, ++upper);
1351 }
1352 
1353 template<typename Key, typename T, typename HashCompare, typename A>
1355  __TBB_ASSERT( item_accessor.my_node, NULL );
1356  node_base *const n = item_accessor.my_node;
1357  hashcode_t const h = item_accessor.my_hash;
1358  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1359  do {
1360  // get bucket
1361  bucket_accessor b( this, h & m, /*writer=*/true );
1362  node_base **p = &b()->node_list;
1363  while( *p && *p != n )
1364  p = &(*p)->next;
1365  if( !*p ) { // someone else was first
1366  if( check_mask_race( h, m ) )
1367  continue;
1368  item_accessor.release();
1369  return false;
1370  }
1371  __TBB_ASSERT( *p == n, NULL );
1372  *p = n->next; // remove from container
1373  my_size--;
1374  break;
1375  } while(true);
1376  if( !item_accessor.is_writer() ) // need to get exclusive lock
1377  item_accessor.upgrade_to_writer(); // return value means nothing here
1378  item_accessor.release();
1379  delete_node( n ); // Only one thread can delete it
1380  return true;
1381 }
1382 
1383 template<typename Key, typename T, typename HashCompare, typename A>
1385  node_base *n;
1386  hashcode_t const h = my_hash_compare.hash( key );
1387  hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1388 restart:
1389  {//lock scope
1390  // get bucket
1391  bucket_accessor b( this, h & m );
1392  search:
1393  node_base **p = &b()->node_list;
1394  n = *p;
1395  while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1396  p = &n->next;
1397  n = *p;
1398  }
1399  if( !n ) { // not found, but mask could be changed
1400  if( check_mask_race( h, m ) )
1401  goto restart;
1402  return false;
1403  }
1404  else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1405  if( check_mask_race( h, m ) ) // contended upgrade, check mask
1406  goto restart;
1407  goto search;
1408  }
1409  *p = n->next;
1410  my_size--;
1411  }
1412  {
1413  typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1414  }
1415  // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1416  delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1417  return true;
1418 }
1419 
1420 template<typename Key, typename T, typename HashCompare, typename A>
1422  typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1423  if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1424  using std::swap;
1425  tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1426  swap(this->my_hash_compare, table.my_hash_compare);
1427  internal_swap(table);
1428  }
1429 }
1430 
1431 template<typename Key, typename T, typename HashCompare, typename A>
1433  reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1434  hashcode_t mask = my_mask;
1435  hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1436  __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1437  bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1438  for(; b <= mask; b++, bp++ ) {
1439  node_base *n = bp->node_list;
1440  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1441  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1442  if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1443  hashcode_t h = b; bucket *b_old = bp;
1444  do {
1445  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1446  hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1447  b_old = get_bucket( h &= m );
1448  } while( b_old->node_list == internal::rehash_req );
1449  // now h - is index of the root rehashed bucket b_old
1450  mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1451  for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1452  hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1453  if( (c & mask) != h ) { // should be rehashed
1454  *p = q->next; // exclude from b_old
1455  bucket *b_new = get_bucket( c & mask );
1456  __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1457  add_to_bucket( b_new, q );
1458  } else p = &q->next; // iterate to next item
1459  }
1460  }
1461  }
1462 #if TBB_USE_PERFORMANCE_WARNINGS
1463  int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1464  static bool reported = false;
1465 #endif
1466 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1467  for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1468  if( b & (b-2) ) ++bp; // not the beginning of a segment
1469  else bp = get_bucket( b );
1470  node_base *n = bp->node_list;
1471  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1472  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1473 #if TBB_USE_PERFORMANCE_WARNINGS
1474  if( n == internal::empty_rehashed ) empty_buckets++;
1475  else if( n->next ) overpopulated_buckets++;
1476 #endif
1477 #if TBB_USE_ASSERT
1478  for( ; is_valid(n); n = n->next ) {
1479  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1480  __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1481  }
1482 #endif
1483  }
1484 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1485 #if TBB_USE_PERFORMANCE_WARNINGS
1486  if( buckets > current_size) empty_buckets -= buckets - current_size;
1487  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1488  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1490  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1492  typeid(*this).name(),
1493 #else
1494  "concurrent_hash_map",
1495 #endif
1496  current_size, empty_buckets, overpopulated_buckets );
1497  reported = true;
1498  }
1499 #endif
1500 }
1501 
1502 template<typename Key, typename T, typename HashCompare, typename A>
1504  hashcode_t m = my_mask;
1505  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1506 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1507 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1508  int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1509  static bool reported = false;
1510 #endif
1511  bucket *bp = 0;
1512  // check consistency
1513  for( segment_index_t b = 0; b <= m; b++ ) {
1514  if( b & (b-2) ) ++bp; // not the beginning of a segment
1515  else bp = get_bucket( b );
1516  node_base *n = bp->node_list;
1517  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1518  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1519 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1520  if( n == internal::empty_rehashed ) empty_buckets++;
1521  else if( n == internal::rehash_req ) buckets--;
1522  else if( n->next ) overpopulated_buckets++;
1523 #endif
1524 #if __TBB_EXTRA_DEBUG
1525  for(; is_valid(n); n = n->next ) {
1526  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1527  h &= m;
1528  __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1529  }
1530 #endif
1531  }
1532 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1533 #if __TBB_STATISTICS
1534  printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1535  " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1536  current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1537  unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1538  my_info_resizes = 0; // concurrent ones
1539  my_info_restarts = 0; // race collisions
1540  my_info_rehashes = 0; // invocations of rehash_bucket
1541 #endif
1542  if( buckets > current_size) empty_buckets -= buckets - current_size;
1543  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1544  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1546  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1548  typeid(*this).name(),
1549 #else
1550  "concurrent_hash_map",
1551 #endif
1552  current_size, empty_buckets, overpopulated_buckets );
1553  reported = true;
1554  }
1555 #endif
1556 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1557  my_size = 0;
1558  segment_index_t s = segment_index_of( m );
1559  __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1560  do {
1561  __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1562  segment_ptr_t buckets_ptr = my_table[s];
1563  size_type sz = segment_size( s ? s : 1 );
1564  for( segment_index_t i = 0; i < sz; i++ )
1565  for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1566  buckets_ptr[i].node_list = n->next;
1567  delete_node( n );
1568  }
1569  delete_segment(s, my_allocator);
1570  } while(s-- > 0);
1571  my_mask = embedded_buckets - 1;
1572 }
1573 
1574 template<typename Key, typename T, typename HashCompare, typename A>
1576  hashcode_t mask = source.my_mask;
1577  if( my_mask == mask ) { // optimized version
1578  reserve( source.my_size, my_allocator ); // TODO: load_factor?
1579  bucket *dst = 0, *src = 0;
1580  bool rehash_required = false;
1581  for( hashcode_t k = 0; k <= mask; k++ ) {
1582  if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1583  else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1584  __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1585  node *n = static_cast<node*>( src->node_list );
1586  if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1587  rehash_required = true;
1589  } else for(; n; n = static_cast<node*>( n->next ) ) {
1590  node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1591  add_to_bucket( dst, node_ptr);
1592  ++my_size; // TODO: replace by non-atomic op
1593  }
1594  }
1595  if( rehash_required ) rehash();
1596  } else internal_copy( source.begin(), source.end(), source.my_size );
1597 }
1598 
1599 template<typename Key, typename T, typename HashCompare, typename A>
1600 template<typename I>
1601 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last, size_type reserve_size) {
1602  reserve( reserve_size, my_allocator ); // TODO: load_factor?
1603  hashcode_t m = my_mask;
1604  for(; first != last; ++first) {
1605  hashcode_t h = my_hash_compare.hash( (*first).first );
1606  bucket *b = get_bucket( h & m );
1607  __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1608  node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1609  add_to_bucket( b, node_ptr );
1610  ++my_size; // TODO: replace by non-atomic op
1611  }
1612 }
1613 
1614 } // namespace interface5
1615 
1617 
1618 
1619 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1621  if(a.size() != b.size()) return false;
1624  for(; i != i_end; ++i) {
1625  j = b.equal_range(i->first).first;
1626  if( j == j_end || !(i->second == j->second) ) return false;
1627  }
1628  return true;
1629 }
1630 
1631 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1633 { return !(a == b); }
1634 
1635 template<typename Key, typename T, typename HashCompare, typename A>
1637 { a.swap( b ); }
1638 
1639 #if _MSC_VER && !defined(__INTEL_COMPILER)
1640  #pragma warning( pop )
1641 #endif // warning 4127 is back
1642 
1643 } // namespace tbb
1644 
1646 #undef __TBB_concurrent_hash_map_H_include_area
1647 
1648 #endif /* __TBB_concurrent_hash_map_H */
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
hash_map_iterator operator++(int)
Post increment.
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) ...
pointer operator->() const
Return pointer to associated value in hash table.
const_reference operator*() const
Return reference to associated value in hash table.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
const concurrent_hash_map::value_type value_type
Type of value.
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_false_type)
void itt_hide_store_word(T &dst, T src)
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a)
bool generic_move_insert(Accessor &&result, value_type &&value)
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
size_type max_size() const
Upper bound on size.
Combines data access, locking, and garbage collection.
std::pair< iterator, iterator > equal_range(const Key &key)
void const char const char int ITT_FORMAT __itt_group_sync s
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:416
bool empty() const
True if size()==0.
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
friend bool operator==(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
bool is_writer
If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock...
range_type range(size_type grainsize=1)
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
const_range_type range(size_type grainsize=1) const
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:716
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
Class that implements exponential backoff.
Definition: tbb_machine.h:348
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
size_type bucket_count() const
Returns the current number of buckets.
internal::hash_map_range< const_iterator > const_range_type
The scoped locking pattern.
Definition: spin_rw_mutex.h:86
size_t segment_index_t
Segment index type.
size_type grainsize() const
The grain size for this range.
size_type size() const
Number of items in table.
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
void delete_segment(segment_index_t s, const Allocator &allocator)
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
size_t my_index
Index in hash table for current item.
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
size_t hashcode_t
Type of a hash code.
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:517
hash_map_range(hash_map_range< U > &r)
type conversion
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
friend const_accessor * accessor_location(const_accessor &a)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
bool erase(const Key &key)
Erase item.
Allows write access to elements and combines data access, locking, and garbage collection.
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
node * my_node
Pointer to node that has current item.
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:863
static hash_map_node_base *const rehash_req
Incompleteness flag value.
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
auto first(Container &c) -> decltype(begin(c))
#define __TBB_Yield()
Definition: ibm_aix51.h:44
hash_compare that is default argument for concurrent_hash_map
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
friend bool is_write_access_needed(accessor const &)
internal::hash_map_range< iterator > range_type
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
allocator_traits< Alloc >::template rebind_alloc< T >::other type
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
node_scoped_guard(node *n, node_allocator_type &alloc)
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
Release.
Definition: atomic.h:59
size_type count(const Key &key) const
Return count of items (0 or 1)
auto last(Container &c) -> decltype(begin(c))
Meets requirements of a forward iterator for STL */.
~const_accessor()
Destroy result after releasing the underlying reference.
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
Unordered map from Key to T.
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
bool is_writer()
check whether bucket is locked for write
hash_map_range(hash_map_range &r, split)
Split range.
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
concurrent_hash_map::value_type value_type
Type of value.
hash_map_iterator & operator=(const hash_map_iterator< Container, typename Container::value_type > &other)
T itt_hide_load_word(const T &src)
const Container * my_map
concurrent_hash_map over which we are iterating.
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
spin_rw_mutex mutex_t
Mutex type for buckets.
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
Identifiers declared inside namespace internal should never be used directly by client code...
Definition: atomic.h:65
Range class used with concurrent_hash_map.
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
bool empty() const
True if range is empty.
reference operator*() const
Return reference to associated value in hash table.
void acquire(spin_rw_mutex &m, bool write=true)
Acquire lock on given mutex.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
std::size_t size_type
Type for size of a range.
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
hash_map_node_base * next
Next node in chain.
base class of concurrent_hash_map
atomic< size_type > my_size
Size of container in stored items.
static node * create_node(node_allocator_type &allocator, Args &&... args)
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
hash_map_iterator()
Construct undefined iterator.
#define __TBB_USE_OPTIONAL_RTTI
Definition: tbb_config.h:125
~concurrent_hash_map()
Clear table and destroy it.
bucket my_embedded_segment[embedded_buckets]
Zero segment.
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
void const char const char int ITT_FORMAT __itt_group_sync p
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
Definition: tick_count.h:126
static segment_index_t segment_base(segment_index_t k)
node * search_bucket(const key_type &key, bucket *b) const
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
friend bool is_write_access_needed(accessor_not_used const &)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
bool is_divisible() const
True if range can be partitioned into two subranges.
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
bool exclude(const_accessor &item_accessor)
delete item by accessor
const_pointer operator->() const
Return pointer to associated value in hash table.
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
Definition: spin_rw_mutex.h:38
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
bucket accessor is to find, rehash, acquire a lock, and access a bucket
Acquire.
Definition: atomic.h:57
hash_map_node_base node_base
Node base type.
static void add_to_bucket(bucket *b, node_base *n)
Add node.
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
static segment_index_t segment_index_of(size_type index)
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
void rehash_bucket(bucket *b_new, const hashcode_t h)
allocator_type get_allocator() const
return allocator object
void insert(I first, I last)
Insert range [first, last)
friend bool is_write_access_needed(const_accessor const &)
friend const_accessor * accessor_location(accessor_not_used const &)
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
bool generic_emplace(Accessor &&result, Args &&... args)
bool erase(accessor &item_accessor)
Erase item by accessor.
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
The graph class.
const bucket * my_bucket
Pointer to bucket.
tbb::aligned_space< value_type > my_value
friend bool operator!=(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:58
static size_type segment_size(segment_index_t k)

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.