Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
reader_writer_lock.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #include "tbb/reader_writer_lock.h"
18 #include "tbb/tbb_machine.h"
19 #include "tbb/tbb_exception.h"
20 #include "itt_notify.h"
21 
22 #if defined(_MSC_VER) && defined(_Wp64)
23  // Workaround for overzealous compiler warnings in /Wp64 mode
24  #pragma warning (disable: 4244)
25 #endif
26 
27 namespace tbb {
28 namespace interface5 {
29 
30 const uintptr_t WFLAG1 = 0x1; // writer interested or active
31 const uintptr_t WFLAG2 = 0x2; // writers interested, no entering readers
32 const uintptr_t RFLAG = 0x4; // reader interested but not active
33 const uintptr_t RC_INCR = 0x8; // to adjust reader count
34 
35 
36 // Perform an atomic bitwise-OR on the operand, and return its previous value.
37 inline uintptr_t fetch_and_or(atomic<uintptr_t>& operand, uintptr_t value) {
39  uintptr_t old = operand;
40  uintptr_t result = operand.compare_and_swap(old|value, old);
41  if (result==old) return result;
42  }
43 }
44 
45 // Perform an atomic bitwise-AND on the operand, and return its previous value.
46 inline uintptr_t fetch_and_and(atomic<uintptr_t>& operand, uintptr_t value) {
48  uintptr_t old = operand;
49  uintptr_t result = operand.compare_and_swap(old&value, old);
50  if (result==old) return result;
51  }
52 }
53 
55 
56 template<typename T, typename U>
57 void spin_wait_while_geq( const volatile T& location, U value ) {
59  while( location>=value ) backoff.pause();
60 }
61 
63 
64 template<typename T, typename U>
65 void spin_wait_until_and( const volatile T& location, U value ) {
67  while( !(location & value) ) backoff.pause();
68 }
69 
70 
71 void reader_writer_lock::internal_construct() {
72  reader_head = NULL;
73  writer_head = NULL;
74  writer_tail = NULL;
75  rdr_count_and_flags = 0;
76  my_current_writer = tbb_thread::id();
77 #if TBB_USE_THREADING_TOOLS
78  ITT_SYNC_CREATE(this, _T("tbb::reader_writer_lock"), _T(""));
79 #endif /* TBB_USE_THREADING_TOOLS */
80 }
81 
82 void reader_writer_lock::internal_destroy() {
83  __TBB_ASSERT(rdr_count_and_flags==0, "reader_writer_lock destroyed with pending readers/writers.");
84  __TBB_ASSERT(reader_head==NULL, "reader_writer_lock destroyed with pending readers.");
85  __TBB_ASSERT(writer_tail==NULL, "reader_writer_lock destroyed with pending writers.");
86  __TBB_ASSERT(writer_head==NULL, "reader_writer_lock destroyed with pending/active writers.");
87 }
88 
89 // Acquires the reader_writer_lock for write. If the lock is currently held in write
90 // mode by another context, the writer will block by spinning on a local variable.
91 // Throws exception improper_lock if the context tries to acquire a
92 // reader_writer_lock that it already has write ownership of.
94  if (is_current_writer()) { // recursive lock attempt
95  // we don't support recursive writer locks; throw exception
97  }
98  else {
99  scoped_lock *a_writer_lock = new scoped_lock();
100  (void) start_write(a_writer_lock);
101  }
102 }
103 
104 // Tries to acquire the reader_writer_lock for write. This function does not block.
105 // Return Value: True or false, depending on whether the lock is acquired or not.
106 // If the lock is already held by this acquiring context, try_lock() returns false.
107 bool reader_writer_lock::try_lock() {
108  if (is_current_writer()) { // recursive lock attempt
109  return false;
110  }
111  else {
112  scoped_lock *a_writer_lock = new scoped_lock();
113  a_writer_lock->status = waiting_nonblocking;
114  return start_write(a_writer_lock);
115  }
116 }
117 
118 bool reader_writer_lock::start_write(scoped_lock *I) {
120  scoped_lock *pred = NULL;
121  if (I->status == waiting_nonblocking) {
122  if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) {
123  delete I;
124  return false;
125  }
126  }
127  else {
128  ITT_NOTIFY(sync_prepare, this);
129  pred = writer_tail.fetch_and_store(I);
130  }
131  if (pred)
132  pred->next = I;
133  else {
134  set_next_writer(I);
135  if (I->status == waiting_nonblocking) {
136  if (I->next) { // potentially more writers
137  set_next_writer(I->next);
138  }
139  else { // no more writers
140  writer_head.fetch_and_store(NULL);
141  if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
142  spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added
143  __TBB_ASSERT(I->next, "There should be a node following the last writer.");
144  set_next_writer(I->next);
145  }
146  }
147  delete I;
148  return false;
149  }
150  }
151  spin_wait_while_eq(I->status, waiting);
152  ITT_NOTIFY(sync_acquired, this);
153  my_current_writer = id;
154  return true;
155 }
156 
157 void reader_writer_lock::set_next_writer(scoped_lock *W) {
158  writer_head = W;
159  if (W->status == waiting_nonblocking) {
160  if (rdr_count_and_flags.compare_and_swap(WFLAG1+WFLAG2, 0) == 0) {
161  W->status = active;
162  }
163  }
164  else {
165  if (fetch_and_or(rdr_count_and_flags, WFLAG1) & RFLAG) { // reader present
166  spin_wait_until_and(rdr_count_and_flags, WFLAG2); // block until readers set WFLAG2
167  }
168  else { // no reader in timing window
169  __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2);
170  }
171  spin_wait_while_geq(rdr_count_and_flags, RC_INCR); // block until readers finish
172  W->status = active;
173  }
174 }
175 
176 // Acquires the reader_writer_lock for read. If the lock is currently held by a writer,
177 // this reader will block and wait until the writers are done.
178 // Throws exception improper_lock when the context tries to acquire a reader_writer_lock
179 // that it already has write ownership of.
180 void reader_writer_lock::lock_read() {
181  if (is_current_writer()) { // recursive lock attempt
182  // we don't support writer->reader downgrade; throw exception
184  }
185  else {
186  scoped_lock_read a_reader_lock;
187  start_read(&a_reader_lock);
188  }
189 }
190 
191 // Tries to acquire the reader_writer_lock for read. This function does not block.
192 // Return Value: True or false, depending on whether the lock is acquired or not.
193 bool reader_writer_lock::try_lock_read() {
194  if (is_current_writer()) { // recursive lock attempt
195  return false;
196  }
197  else {
198  if (rdr_count_and_flags.fetch_and_add(RC_INCR) & (WFLAG1+WFLAG2)) { // writers present
199  rdr_count_and_flags -= RC_INCR;
200  return false;
201  }
202  else { // no writers
203  ITT_NOTIFY(sync_acquired, this);
204  return true;
205  }
206  }
207 }
208 
209 void reader_writer_lock::start_read(scoped_lock_read *I) {
210  ITT_NOTIFY(sync_prepare, this);
211  I->next = reader_head.fetch_and_store(I);
212  if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags
213  // unblock and/or update statuses of non-blocking readers
214  if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers
215  unblock_readers();
216  }
217  }
218  __TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking.");
219  spin_wait_while_eq(I->status, waiting); // block
220  if (I->next) {
221  __TBB_ASSERT(I->next->status == waiting, NULL);
222  rdr_count_and_flags += RC_INCR;
223  I->next->status = active; // wake successor
224  }
225  ITT_NOTIFY(sync_acquired, this);
226 }
227 
228 void reader_writer_lock::unblock_readers() {
229  // clear rdr interest flag, increment rdr count
230  __TBB_ASSERT(rdr_count_and_flags&RFLAG, NULL);
231  rdr_count_and_flags += RC_INCR-RFLAG;
232  __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, NULL);
233  // indicate clear of window
234  if (rdr_count_and_flags & WFLAG1 && !(rdr_count_and_flags & WFLAG2)) {
235  __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2);
236  }
237  // unblock waiting readers
238  scoped_lock_read *head = reader_head.fetch_and_store(NULL);
239  __TBB_ASSERT(head, NULL);
240  __TBB_ASSERT(head->status == waiting, NULL);
241  head->status = active;
242 }
243 
244 // Releases the reader_writer_lock
245 void reader_writer_lock::unlock() {
246  if( my_current_writer!=tbb_thread::id() ) {
247  // A writer owns the lock
248  __TBB_ASSERT(is_current_writer(), "caller of reader_writer_lock::unlock() does not own the lock.");
249  __TBB_ASSERT(writer_head, NULL);
250  __TBB_ASSERT(writer_head->status==active, NULL);
251  scoped_lock *a_writer_lock = writer_head;
252  end_write(a_writer_lock);
253  __TBB_ASSERT(a_writer_lock != writer_head, "Internal error: About to turn writer_head into dangling reference.");
254  delete a_writer_lock;
255  } else {
256  end_read();
257  }
258 }
259 
260 void reader_writer_lock::end_write(scoped_lock *I) {
261  __TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock.");
262  my_current_writer = tbb_thread::id();
263  ITT_NOTIFY(sync_releasing, this);
264  if (I->next) { // potentially more writers
265  writer_head = I->next;
266  writer_head->status = active;
267  }
268  else { // No more writers; clear writer flag, test reader interest flag
269  __TBB_ASSERT(writer_head, NULL);
270  if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) {
271  unblock_readers();
272  }
273  writer_head.fetch_and_store(NULL);
274  if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
275  spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added
276  __TBB_ASSERT(I->next, "There should be a node following the last writer.");
277  set_next_writer(I->next);
278  }
279  }
280 }
281 
282 void reader_writer_lock::end_read() {
283  ITT_NOTIFY(sync_releasing, this);
284  __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, "unlock() called but no readers hold the lock.");
285  rdr_count_and_flags -= RC_INCR;
286 }
287 
288 inline bool reader_writer_lock::is_current_writer() {
289  return my_current_writer==this_tbb_thread::get_id();
290 }
291 
292 // Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock
293 void reader_writer_lock::scoped_lock::internal_construct (reader_writer_lock& lock) {
294  mutex = &lock;
295  next = NULL;
296  status = waiting;
297  if (mutex->is_current_writer()) { // recursive lock attempt
298  // we don't support recursive writer locks; throw exception
300  }
301  else { // this thread holds no locks
302  (void) mutex->start_write(this);
303  }
304 }
305 
306 inline reader_writer_lock::scoped_lock::scoped_lock() : mutex(NULL), next(NULL) {
307  status = waiting;
308 }
309 
310 // Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock
311 void reader_writer_lock::scoped_lock_read::internal_construct (reader_writer_lock& lock) {
312  mutex = &lock;
313  next = NULL;
314  status = waiting;
315  if (mutex->is_current_writer()) { // recursive lock attempt
316  // we don't support writer->reader downgrade; throw exception
318  }
319  else { // this thread holds no locks
320  mutex->start_read(this);
321  }
322 }
323 
324 inline reader_writer_lock::scoped_lock_read::scoped_lock_read() : mutex(NULL), next(NULL) {
325  status = waiting;
326 }
327 
328 void reader_writer_lock::scoped_lock::internal_destroy() {
329  if (mutex) {
330  __TBB_ASSERT(mutex->is_current_writer(), "~scoped_lock() destroyed by thread different than thread that holds lock.");
331  mutex->end_write(this);
332  }
333  status = invalid;
334 }
335 
336 void reader_writer_lock::scoped_lock_read::internal_destroy() {
337  if (mutex)
338  mutex->end_read();
339  status = invalid;
340 }
341 
342 } // namespace interface5
343 } // namespace tbb
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
uintptr_t fetch_and_or(atomic< uintptr_t > &operand, uintptr_t value)
const uintptr_t WFLAG2
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
const uintptr_t RFLAG
uintptr_t fetch_and_and(atomic< uintptr_t > &operand, uintptr_t value)
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:391
void pause()
Pause for a while.
Definition: tbb_machine.h:360
void __TBB_AtomicOR(volatile void *operand, uintptr_t addend)
Definition: tbb_machine.h:878
const uintptr_t WFLAG1
void spin_wait_while_geq(const volatile T &location, U value)
Spin WHILE the value at the location is greater than or equal to a given value.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
__TBB_DEPRECATED_IN_VERBOSE_MODE tbb_thread::id get_id()
Definition: tbb_thread.h:331
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
Definition: itt_notify.h:59
Class that implements exponential backoff.
Definition: tbb_machine.h:345
void spin_wait_until_and(const volatile T &location, U value)
Spin UNTIL (location & value) is true.
const uintptr_t RC_INCR
#define ITT_SYNC_CREATE(obj, type, name)
Definition: itt_notify.h:115
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
The graph class.

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.