Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
queuing_mutex.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_queuing_mutex_H
18 #define __TBB_queuing_mutex_H
19 
20 #define __TBB_queuing_mutex_H_include_area
22 
23 #include <cstring>
24 #include "atomic.h"
25 #include "tbb_profiling.h"
26 
27 namespace tbb {
28 
30 
32 public:
35  q_tail = NULL;
36 #if TBB_USE_THREADING_TOOLS
38 #endif
39  }
40 
42 
46  void initialize() {
47  mutex = NULL;
48  going = 0;
49 #if TBB_USE_ASSERT
51 #endif /* TBB_USE_ASSERT */
52  }
53 
54  public:
56 
58 
61  initialize();
62  acquire(m);
63  }
64 
67  if( mutex ) release();
68  }
69 
72 
75 
78 
79  private:
82 
85 
87 
90  uintptr_t going;
91  };
92 
94 
95  // Mutex traits
96  static const bool is_rw_mutex = false;
97  static const bool is_recursive_mutex = false;
98  static const bool is_fair_mutex = true;
99 
100 private:
102  atomic<scoped_lock*> q_tail;
103 
104 };
105 
107 
108 } // namespace tbb
109 
111 #undef __TBB_queuing_mutex_H_include_area
112 
113 #endif /* __TBB_queuing_mutex_H */
atomic< scoped_lock * > q_tail
The last competitor requesting the lock.
scoped_lock(queuing_mutex &m)
Acquire lock on given mutex.
Definition: queuing_mutex.h:60
static const bool is_fair_mutex
Definition: queuing_mutex.h:98
scoped_lock()
Construct lock that has not acquired a mutex.
Definition: queuing_mutex.h:57
Queuing mutex with local-only spinning.
Definition: queuing_mutex.h:31
#define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)
void initialize()
Initialize fields to mean "no lock held".
Definition: queuing_mutex.h:46
void __TBB_EXPORTED_METHOD acquire(queuing_mutex &m)
Acquire lock on given mutex.
queuing_mutex()
Construct unacquired mutex.
Definition: queuing_mutex.h:34
~scoped_lock()
Release lock (if lock is held).
Definition: queuing_mutex.h:66
void __TBB_EXPORTED_METHOD release()
Release lock.
The scoped locking pattern.
Definition: queuing_mutex.h:44
uintptr_t going
The local spin-wait variable.
Definition: queuing_mutex.h:90
static const bool is_recursive_mutex
Definition: queuing_mutex.h:97
static const bool is_rw_mutex
Definition: queuing_mutex.h:96
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
bool __TBB_EXPORTED_METHOD try_acquire(queuing_mutex &m)
Acquire lock on given mutex if free (i.e. non-blocking)
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
queuing_mutex * mutex
The pointer to the mutex owned, or NULL if not holding a mutex.
Definition: queuing_mutex.h:81
void poison_pointer(T *__TBB_atomic &)
Definition: tbb_stddef.h:305
scoped_lock * next
The pointer to the next competitor for a mutex.
Definition: queuing_mutex.h:84
The graph class.
void __TBB_EXPORTED_METHOD internal_construct()

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.