Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
Go to the documentation of this file.
59 while( !try_acquire_internal_lock() ) {
76 wait_for_release_of_internal_lock();
78 release_internal_lock();
81 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
83 #pragma warning (push)
84 #pragma warning (disable: 4311 4312)
93 template<memory_semantics M>
95 return reinterpret_cast<T*>(
atomic_traits<
sizeof(T*),M>::fetch_and_add(location, addend) );
97 template<memory_semantics M>
99 return reinterpret_cast<T*>(
atomic_traits<
sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(
value)) );
101 template<memory_semantics M>
103 return reinterpret_cast<T*>(
104 atomic_traits<
sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(
value),
105 reinterpret_cast<word>(comparand))
113 return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );
116 return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );
122 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
124 #pragma warning (pop)
132 return uintptr_t(ptr) &
FLAG;
142 __TBB_ASSERT( !my_mutex,
"scoped_lock is already holding a mutex");
171 bool sync_prepare_done =
false;
174 unsigned short pred_state;
176 if( uintptr_t(pred) &
FLAG ) {
194 sync_prepare_done =
true;
205 if( !sync_prepare_done )
228 __TBB_ASSERT( !my_mutex,
"scoped_lock is already holding a mutex");
230 if( load<relaxed>(m.
q_tail) )
268 if(
this == my_mutex->q_tail.compare_and_swap<
tbb::release>(NULL,
this) ) {
278 acquire_internal_lock();
282 unblock_or_wait_on_internal_lock(
get_flag(tmp));
302 tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred,
tricky_pointer(pred) |
FLAG );
303 if( !(uintptr_t(tmp) &
FLAG) ) {
316 acquire_internal_lock();
328 tmp = tricky_pointer::fetch_and_store<tbb::release>(&(l_next->my_prev), pred);
337 acquire_internal_lock();
340 if(
this != my_mutex->q_tail.compare_and_swap<
tbb::release>(NULL,
this) ) {
348 tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->
my_prev), NULL);
352 unblock_or_wait_on_internal_lock(
get_flag(tmp));
368 if(
this==my_mutex->q_tail.load<
full_fence>() ) {
398 acquire_internal_lock();
402 n = tricky_pointer::fetch_and_add<tbb::acquire>(&my_next,
FLAG);
403 unsigned short n_state = n->
my_state;
407 tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->
my_prev),
this);
408 unblock_or_wait_on_internal_lock(
get_flag(tmp));
428 release_internal_lock();
440 pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev,
FLAG);
442 bool success = pred->try_acquire_internal_lock();
445 tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred,
tricky_pointer(pred)|
FLAG );
446 if( uintptr_t(tmp) &
FLAG ) {
451 pred->release_internal_lock();
455 pred->release_internal_lock();
469 wait_for_release_of_internal_lock();
bool upgrade_to_writer()
Upgrade reader to become a writer.
void unblock_or_wait_on_internal_lock(uintptr_t)
A helper function.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void __TBB_store_with_release(volatile T &location, V value)
#define ITT_NOTIFY(name, obj)
void acquire(queuing_rw_mutex &m, bool write=true)
Acquire lock on given mutex.
state_t_flags
Flag bits in a state_t that specify information about a locking request.
A view of a T* with additional functionality for twiddling low-order bits.
unsigned char my_internal_lock
A tiny internal lock.
bool try_acquire(queuing_rw_mutex &m, bool write=true)
Acquire lock on given mutex if free (i.e. non-blocking)
Class that implements exponential backoff.
scoped_lock *__TBB_atomic *__TBB_atomic my_next
static T * fetch_and_add(T *volatile *location, word addend)
T __TBB_load_relaxed(const volatile T &location)
atomic< T > & as_atomic(T &t)
T * operator|(word operand2) const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
T __TBB_load_with_acquire(const volatile T &location)
static const tricky_pointer::word FLAG
Mask for low order bit of a pointer.
scoped_lock *__TBB_atomic my_prev
The pointer to the previous and next competitors for a mutex.
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Queuing reader-writer mutex with local-only spinning.
#define ITT_SYNC_CREATE(obj, type, name)
void acquire_internal_lock()
Acquire the internal lock.
const unsigned char ACQUIRED
tricky_atomic_pointer< queuing_rw_mutex::scoped_lock > tricky_pointer
atomic< scoped_lock * > q_tail
The last competitor requesting the lock.
static T * fetch_and_store(T *volatile *location, T *value)
void release_internal_lock()
Release the internal lock.
#define __TBB_control_consistency_helper()
atomic< state_t > my_state
State of the request: reader, writer, active reader, other service states.
atomic_selector< sizeof(T *)>::word word
void release()
Release lock.
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
T * operator&(word operand2) const
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
void __TBB_Pause(int32_t)
unsigned char __TBB_atomic my_going
The local spin-wait variable.
bool try_acquire_internal_lock()
Try to acquire the internal lock.
uintptr_t get_flag(queuing_rw_mutex::scoped_lock *ptr)
tricky_atomic_pointer(T *volatile &original)
const unsigned char RELEASED
tricky_atomic_pointer(T *&original)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void __TBB_store_relaxed(volatile T &location, V value)
void __TBB_EXPORTED_METHOD internal_construct()
void wait_for_release_of_internal_lock()
Wait for internal lock to be released.
The scoped locking pattern.
static T * compare_and_swap(T *volatile *location, T *value, T *comparand)
bool downgrade_to_reader()
Downgrade writer to become a reader.
Base class for types that should not be copied or assigned.
Copyright © 2005-2019 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.