22 #if defined(_MSC_VER) && defined(_Wp64) 24 #pragma warning (disable: 4244) 28 namespace interface5 {
39 uintptr_t old = operand;
40 uintptr_t result = operand.compare_and_swap(old|
value, old);
41 if (result==old)
return result;
48 uintptr_t old = operand;
49 uintptr_t result = operand.compare_and_swap(old&
value, old);
50 if (result==old)
return result;
56 template<
typename T,
typename U>
64 template<
typename T,
typename U>
71 void reader_writer_lock::internal_construct() {
75 rdr_count_and_flags = 0;
77 #if TBB_USE_THREADING_TOOLS 82 void reader_writer_lock::internal_destroy() {
83 __TBB_ASSERT(rdr_count_and_flags==0,
"reader_writer_lock destroyed with pending readers/writers.");
84 __TBB_ASSERT(reader_head==NULL,
"reader_writer_lock destroyed with pending readers.");
85 __TBB_ASSERT(writer_tail==NULL,
"reader_writer_lock destroyed with pending writers.");
86 __TBB_ASSERT(writer_head==NULL,
"reader_writer_lock destroyed with pending/active writers.");
94 if (is_current_writer()) {
99 scoped_lock *a_writer_lock =
new scoped_lock();
100 (
void) start_write(a_writer_lock);
107 bool reader_writer_lock::try_lock() {
108 if (is_current_writer()) {
112 scoped_lock *a_writer_lock =
new scoped_lock();
113 a_writer_lock->status = waiting_nonblocking;
114 return start_write(a_writer_lock);
118 bool reader_writer_lock::start_write(scoped_lock *I) {
120 scoped_lock *pred = NULL;
121 if (I->status == waiting_nonblocking) {
122 if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) {
129 pred = writer_tail.fetch_and_store(I);
135 if (I->status == waiting_nonblocking) {
137 set_next_writer(I->next);
140 writer_head.fetch_and_store(NULL);
141 if (I != writer_tail.compare_and_swap(NULL, I)) {
143 __TBB_ASSERT(I->next,
"There should be a node following the last writer.");
144 set_next_writer(I->next);
153 my_current_writer =
id;
157 void reader_writer_lock::set_next_writer(scoped_lock *W) {
159 if (W->status == waiting_nonblocking) {
160 if (rdr_count_and_flags.compare_and_swap(
WFLAG1+
WFLAG2, 0) == 0) {
180 void reader_writer_lock::lock_read() {
181 if (is_current_writer()) {
186 scoped_lock_read a_reader_lock;
187 start_read(&a_reader_lock);
193 bool reader_writer_lock::try_lock_read() {
194 if (is_current_writer()) {
199 rdr_count_and_flags -=
RC_INCR;
209 void reader_writer_lock::start_read(scoped_lock_read *I) {
211 I->next = reader_head.fetch_and_store(I);
218 __TBB_ASSERT(I->status == waiting || I->status == active,
"Lock requests should be waiting or active before blocking.");
222 rdr_count_and_flags +=
RC_INCR;
223 I->next->status = active;
228 void reader_writer_lock::unblock_readers() {
234 if (rdr_count_and_flags &
WFLAG1 && !(rdr_count_and_flags &
WFLAG2)) {
238 scoped_lock_read *
head = reader_head.fetch_and_store(NULL);
241 head->status = active;
245 void reader_writer_lock::unlock() {
248 __TBB_ASSERT(is_current_writer(),
"caller of reader_writer_lock::unlock() does not own the lock.");
251 scoped_lock *a_writer_lock = writer_head;
252 end_write(a_writer_lock);
253 __TBB_ASSERT(a_writer_lock != writer_head,
"Internal error: About to turn writer_head into dangling reference.");
254 delete a_writer_lock;
260 void reader_writer_lock::end_write(scoped_lock *I) {
261 __TBB_ASSERT(I==writer_head,
"Internal error: can't unlock a thread that is not holding the lock.");
265 writer_head = I->next;
266 writer_head->status = active;
273 writer_head.fetch_and_store(NULL);
274 if (I != writer_tail.compare_and_swap(NULL, I)) {
276 __TBB_ASSERT(I->next,
"There should be a node following the last writer.");
277 set_next_writer(I->next);
282 void reader_writer_lock::end_read() {
284 __TBB_ASSERT(rdr_count_and_flags >=
RC_INCR,
"unlock() called but no readers hold the lock.");
285 rdr_count_and_flags -=
RC_INCR;
288 inline bool reader_writer_lock::is_current_writer() {
293 void reader_writer_lock::scoped_lock::internal_construct (reader_writer_lock&
lock) {
297 if (mutex->is_current_writer()) {
302 (
void) mutex->start_write(
this);
306 inline reader_writer_lock::scoped_lock::scoped_lock() : mutex(NULL), next(NULL) {
311 void reader_writer_lock::scoped_lock_read::internal_construct (reader_writer_lock&
lock) {
315 if (mutex->is_current_writer()) {
320 mutex->start_read(
this);
324 inline reader_writer_lock::scoped_lock_read::scoped_lock_read() : mutex(NULL), next(NULL) {
328 void reader_writer_lock::scoped_lock::internal_destroy() {
330 __TBB_ASSERT(mutex->is_current_writer(),
"~scoped_lock() destroyed by thread different than thread that holds lock.");
331 mutex->end_write(
this);
336 void reader_writer_lock::scoped_lock_read::internal_destroy() {
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
uintptr_t fetch_and_or(atomic< uintptr_t > &operand, uintptr_t value)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
uintptr_t fetch_and_and(atomic< uintptr_t > &operand, uintptr_t value)
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
void pause()
Pause for a while.
void __TBB_AtomicOR(volatile void *operand, uintptr_t addend)
void spin_wait_while_geq(const volatile T &location, U value)
Spin WHILE the value at the location is greater than or equal to a given value.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
__TBB_DEPRECATED_IN_VERBOSE_MODE tbb_thread::id get_id()
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
Class that implements exponential backoff.
void spin_wait_until_and(const volatile T &location, U value)
Spin UNTIL (location & value) is true.
#define ITT_SYNC_CREATE(obj, type, name)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
#define ITT_NOTIFY(name, obj)
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock