6#ifndef HAZARD_ERAS_IMPL
7#error "This is an impl file and must not be included directly!"
10#include <xenium/aligned_object.hpp>
11#include <xenium/detail/port.hpp>
19#pragma warning(disable: 4324)
20#pragma warning(disable: 26495)
23namespace xenium {
namespace reclamation {
25 template <
class Traits>
26 template <
class T,
class MarkedPtr>
27 hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(
const MarkedPtr& p) :
31 if (this->ptr.get() !=
nullptr)
33 auto era = era_clock.load(std::memory_order_relaxed);
34 he = local_thread_data().alloc_hazard_era(era);
38 template <
class Traits>
39 template <
class T,
class MarkedPtr>
40 hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(
const guard_ptr& p) :
48 template <
class Traits>
49 template <
class T,
class MarkedPtr>
50 hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept :
58 template <
class Traits>
59 template <
class T,
class MarkedPtr>
60 auto hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::operator=(
const guard_ptr& p)
noexcept
74 template <
class Traits>
75 template <
class T,
class MarkedPtr>
76 auto hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p)
noexcept
83 this->ptr = std::move(p.ptr);
90 template <
class Traits>
91 template <
class T,
class MarkedPtr>
92 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::acquire(
const concurrent_ptr<T>& p,
93 std::memory_order order)
95 if (order == std::memory_order_relaxed || order == std::memory_order_consume) {
98 order = std::memory_order_acquire;
100 era_t prev_era = he ==
nullptr ? 0 : he->get_era();
105 auto value = p.load(order);
107 auto era = era_clock.load(std::memory_order_relaxed);
108 if (era == prev_era) {
115 assert(he->get_era() != era);
116 if (he->guards() == 1)
128 assert(he ==
nullptr);
129 he = local_thread_data().alloc_hazard_era(era);
134 template <
class Traits>
135 template <
class T,
class MarkedPtr>
136 bool hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::acquire_if_equal(
137 const concurrent_ptr<T>& p,
138 const MarkedPtr& expected,
139 std::memory_order order)
141 if (order == std::memory_order_relaxed || order == std::memory_order_consume) {
144 order = std::memory_order_acquire;
150 auto p1 = p.load(order);
151 if (p1 ==
nullptr || p1 != expected) {
153 return p1 == expected;
156 const auto era = era_clock.load(std::memory_order_relaxed);
157 if (he !=
nullptr && he->guards() == 1)
163 he = local_thread_data().alloc_hazard_era(era);
166 this->ptr = p.load(std::memory_order_relaxed);
175 template <
class Traits>
176 template <
class T,
class MarkedPtr>
177 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::reset() noexcept
179 local_thread_data().release_hazard_era(he);
180 assert(this->he ==
nullptr);
184 template <
class Traits>
185 template <
class T,
class MarkedPtr>
186 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::do_swap(guard_ptr& g)
noexcept
191 template <
class Traits>
192 template <
class T,
class MarkedPtr>
193 void hazard_eras<Traits>::guard_ptr<T, MarkedPtr>::reclaim(Deleter d)
noexcept
195 auto p = this->ptr.get();
197 p->set_deleter(std::move(d));
199 p->retirement_era = era_clock.fetch_add(1, std::memory_order_release);
201 if (local_thread_data().add_retired_node(p) >= allocation_strategy::retired_nodes_threshold())
202 local_thread_data().scan();
206 template <
class Strategy,
class Derived>
207 struct alignas(64) basic_he_thread_control_block :
208 detail::thread_block_list<Derived, detail::deletable_object_with_eras>::entry,
209 aligned_object<basic_he_thread_control_block<Strategy, Derived>>
211 using era_t = uint64_t;
213 ~basic_he_thread_control_block() {
214 assert(last_hazard_era !=
nullptr);
219 hazard_era(): value(nullptr), guard_cnt(0) {}
221 void set_era(era_t era)
225 value.store(marked_ptr(
reinterpret_cast<void**
>(era << 1)), std::memory_order_release);
234 std::atomic_thread_fence(std::memory_order_seq_cst);
237 era_t get_era()
const
240 bool success = try_get_era(result);
247 uint64_t guards()
const {
return guard_cnt; }
248 uint64_t add_guard() {
return ++guard_cnt; }
249 uint64_t release_guard() {
return --guard_cnt; }
251 bool try_get_era(era_t& result)
const
255 constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
256 auto v = value.load(memory_order);
259 result =
reinterpret_cast<era_t
>(v.get()) >> 1;
265 void set_link(hazard_era* link)
267 assert(guard_cnt == 0);
269 value.store(marked_ptr(
reinterpret_cast<void**
>(link), 1), std::memory_order_release);
271 hazard_era* get_link()
const
274 return reinterpret_cast<hazard_era*
>(value.load(std::memory_order_relaxed).get());
279 return value.load(std::memory_order_relaxed).mark() != 0;
285 std::atomic<marked_ptr> value;
290 using hint = hazard_era*;
292 void initialize(hint& hint)
294 Strategy::number_of_active_hes.fetch_add(self().number_of_hes(), std::memory_order_relaxed);
295 hint = initialize_block(self());
300 Strategy::number_of_active_hes.fetch_sub(self().number_of_hes(), std::memory_order_relaxed);
301 detail::thread_block_list<Derived, detail::deletable_object_with_eras>::entry::abandon();
304 hazard_era* alloc_hazard_era(hint& hint, era_t era)
306 if (last_hazard_era && last_era == era) {
307 last_hazard_era->add_guard();
308 return last_hazard_era;
311 if (result ==
nullptr)
312 result = self().need_more_hes();
314 hint = result->get_link();
315 result->set_era(era);
318 last_hazard_era = result;
323 void release_hazard_era(hazard_era*& he, hint& hint)
325 assert(he !=
nullptr);
326 if (he->release_guard() == 0)
328 if (he == last_hazard_era)
329 last_hazard_era =
nullptr;
338 Derived& self() {
return static_cast<Derived&
>(*this); }
340 hazard_era* begin() {
return &eras[0]; }
341 hazard_era* end() {
return &eras[Strategy::K]; }
342 const hazard_era* begin()
const {
return &eras[0]; }
343 const hazard_era* end()
const {
return &eras[Strategy::K]; }
345 template <
typename T>
346 static hazard_era* initialize_block(T& block)
348 auto begin = block.begin();
349 auto end = block.end() - 1;
350 for (
auto it = begin; it != end;)
356 end->set_link(block.initialize_next_block());
360 static void gather_protected_eras(std::vector<era_t>& protected_eras,
361 const hazard_era* begin,
const hazard_era* end)
363 for (
auto it = begin; it != end; ++it)
366 if (it->try_get_era(era))
367 protected_eras.push_back(era);
371 hazard_era* last_hazard_era;
373 hazard_era eras[Strategy::K];
376 template <
class Strategy>
377 struct static_he_thread_control_block :
378 basic_he_thread_control_block<Strategy, static_he_thread_control_block<Strategy>>
380 using base = basic_he_thread_control_block<Strategy, static_he_thread_control_block>;
381 using hazard_era =
typename base::hazard_era;
382 using era_t =
typename base::era_t;
385 void gather_protected_eras(std::vector<era_t>& protected_eras)
const
387 base::gather_protected_eras(protected_eras, this->begin(), this->end());
390 hazard_era* need_more_hes() {
391 throw bad_hazard_era_alloc(
"hazard era pool exceeded"); }
392 constexpr size_t number_of_hes()
const {
return Strategy::K; }
393 constexpr hazard_era* initialize_next_block()
const {
return nullptr; }
396 template <
class Strategy>
397 struct dynamic_he_thread_control_block :
398 basic_he_thread_control_block<Strategy, dynamic_he_thread_control_block<Strategy>>
400 using base = basic_he_thread_control_block<Strategy, dynamic_he_thread_control_block>;
401 using hazard_era =
typename base::hazard_era;
402 using era_t =
typename base::era_t;
405 void gather_protected_eras(std::vector<era_t>& protected_eras)
const
407 gather_protected_eras(*
this, protected_eras);
411 struct alignas(64) hazard_eras_block : aligned_object<hazard_eras_block>
413 hazard_eras_block(
size_t size) : size(size) {
414 for (
auto it = begin(); it != end(); ++it) {
419 hazard_era* begin() {
return reinterpret_cast<hazard_era*
>(
this + 1); }
420 hazard_era* end() {
return begin() + size; }
422 const hazard_era* begin()
const {
return reinterpret_cast<const hazard_era*
>(
this + 1); }
423 const hazard_era* end()
const {
return begin() + size; }
425 const hazard_eras_block* next_block()
const {
return next; }
426 hazard_era* initialize_next_block() {
return next ? base::initialize_block(*next) : nullptr; }
428 hazard_eras_block* next =
nullptr;
432 const hazard_eras_block* next_block()
const
435 return he_block.load(std::memory_order_acquire);
437 size_t number_of_hes()
const {
return total_number_of_hes; }
438 hazard_era* need_more_hes() {
return allocate_new_hazard_eras_block(); }
441 hazard_era* initialize_next_block()
443 auto block = he_block.load(std::memory_order_relaxed);
444 return block ? base::initialize_block(*block) : nullptr;
447 template <
typename T>
448 static void gather_protected_eras(
const T& block, std::vector<era_t>& protected_eras)
450 base::gather_protected_eras(protected_eras, block.begin(), block.end());
452 auto next = block.next_block();
454 gather_protected_eras(*next, protected_eras);
457 hazard_era* allocate_new_hazard_eras_block()
459 size_t hes = std::max(
static_cast<size_t>(Strategy::K), total_number_of_hes / 2);
460 total_number_of_hes += hes;
461 Strategy::number_of_active_hes.fetch_add(hes, std::memory_order_relaxed);
463 size_t buffer_size =
sizeof(hazard_eras_block) + hes *
sizeof(hazard_era);
464 void* buffer = hazard_eras_block::operator
new(buffer_size);
465 auto block = ::new(buffer) hazard_eras_block(hes);
466 auto result = this->initialize_block(*block);
467 block->next = he_block.load(std::memory_order_relaxed);
469 he_block.store(block, std::memory_order_release);
473 size_t total_number_of_hes = Strategy::K;
474 std::atomic<hazard_eras_block*> he_block;
478 template <
class Traits>
479 struct alignas(64) hazard_eras<Traits>::thread_data : aligned_object<thread_data>
481 using HE =
typename thread_control_block::hazard_era*;
485 if (retire_list !=
nullptr)
488 if (retire_list !=
nullptr)
489 global_thread_block_list.abandon_retired_nodes(retire_list);
490 retire_list =
nullptr;
493 if (control_block !=
nullptr) {
494 global_thread_block_list.release_entry(control_block);
495 control_block =
nullptr;
499 HE alloc_hazard_era(era_t era)
501 ensure_has_control_block();
502 return control_block->alloc_hazard_era(hint, era);
505 void release_hazard_era(HE& he)
508 assert(control_block !=
nullptr);
509 control_block->release_hazard_era(he, hint);
513 std::size_t add_retired_node(detail::deletable_object_with_eras* p)
515 p->next = retire_list;
517 return ++number_of_retired_nodes;
522 std::vector<era_t> protected_eras;
523 protected_eras.reserve(allocation_strategy::number_of_active_hazard_eras());
526 std::atomic_thread_fence(std::memory_order_seq_cst);
528 auto adopted_nodes = global_thread_block_list.adopt_abandoned_retired_nodes();
530 std::for_each(global_thread_block_list.begin(), global_thread_block_list.end(),
531 [&protected_eras](
const auto& entry)
535 constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
536 if (entry.is_active(memory_order))
537 entry.gather_protected_eras(protected_eras);
541 std::atomic_thread_fence(std::memory_order_acquire);
543 std::sort(protected_eras.begin(), protected_eras.end());
544 auto last = std::unique(protected_eras.begin(), protected_eras.end());
545 protected_eras.erase(last, protected_eras.end());
547 auto list = retire_list;
548 retire_list =
nullptr;
549 number_of_retired_nodes = 0;
550 reclaim_nodes(list, protected_eras);
551 reclaim_nodes(adopted_nodes, protected_eras);
555 void ensure_has_control_block()
557 if (control_block ==
nullptr)
559 control_block = global_thread_block_list.acquire_inactive_entry();
560 control_block->initialize(hint);
561 control_block->activate();
565 void reclaim_nodes(detail::deletable_object_with_eras* list,
566 const std::vector<era_t>& protected_eras)
568 while (list !=
nullptr)
573 auto era_it = std::lower_bound(protected_eras.begin(), protected_eras.end(), cur->construction_era);
574 if (era_it == protected_eras.end() || *era_it > cur->retirement_era)
577 add_retired_node(cur);
581 detail::deletable_object_with_eras* retire_list =
nullptr;
582 std::size_t number_of_retired_nodes = 0;
583 typename thread_control_block::hint hint;
585 thread_control_block* control_block =
nullptr;
587 friend class hazard_eras;
588 ALLOCATION_COUNTER(hazard_eras);
591 template <
class Traits>
592 inline typename hazard_eras<Traits>::thread_data& hazard_eras<Traits>::local_thread_data()
595 static thread_local thread_data local_thread_data;
596 return local_thread_data;
599#ifdef TRACK_ALLOCATIONS
600 template <
class Traits>
601 inline void hazard_eras<Traits>::count_allocation()
602 { local_thread_data().allocation_counter.count_allocation(); }
604 template <
class Traits>
605 inline void hazard_eras<Traits>::count_reclamation()
606 { local_thread_data().allocation_counter.count_reclamation(); }
A pointer with an embedded mark/tag value.
Definition: marked_ptr.hpp:41