Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
_mutex_padding.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_mutex_padding_H
18 #define __TBB_mutex_padding_H
19 
20 // wrapper for padding mutexes to be alone on a cache line, without requiring they be allocated
21 // from a pool. Because we allow them to be defined anywhere they must be two cache lines in size.
22 
23 
24 namespace tbb {
25 namespace interface7 {
26 namespace internal {
27 
28 static const size_t cache_line_size = 64;
29 
30 // Pad a mutex to occupy a number of full cache lines sufficient to avoid false sharing
31 // with other data; space overhead is up to 2*cache_line_size-1.
32 template<typename Mutex, bool is_rw> class padded_mutex;
33 
34 template<typename Mutex>
36  typedef long pad_type;
37  pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)];
38 
39  Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);}
40 
41 public:
42  static const bool is_rw_mutex = Mutex::is_rw_mutex;
43  static const bool is_recursive_mutex = Mutex::is_recursive_mutex;
44  static const bool is_fair_mutex = Mutex::is_fair_mutex;
45 
46  padded_mutex() { new(impl()) Mutex(); }
47  ~padded_mutex() { impl()->~Mutex(); }
48 
50  class scoped_lock : tbb::internal::no_copy {
51  typename Mutex::scoped_lock my_scoped_lock;
52  public:
53  scoped_lock() : my_scoped_lock() {}
54  scoped_lock( padded_mutex& m ) : my_scoped_lock(*m.impl()) { }
56 
57  void acquire( padded_mutex& m ) { my_scoped_lock.acquire(*m.impl()); }
58  bool try_acquire( padded_mutex& m ) { return my_scoped_lock.try_acquire(*m.impl()); }
59  void release() { my_scoped_lock.release(); }
60  };
61 };
62 
63 template<typename Mutex>
65  typedef long pad_type;
66  pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)];
67 
68  Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);}
69 
70 public:
71  static const bool is_rw_mutex = Mutex::is_rw_mutex;
72  static const bool is_recursive_mutex = Mutex::is_recursive_mutex;
73  static const bool is_fair_mutex = Mutex::is_fair_mutex;
74 
75  padded_mutex() { new(impl()) Mutex(); }
76  ~padded_mutex() { impl()->~Mutex(); }
77 
79  class scoped_lock : tbb::internal::no_copy {
80  typename Mutex::scoped_lock my_scoped_lock;
81  public:
82  scoped_lock() : my_scoped_lock() {}
83  scoped_lock( padded_mutex& m, bool write = true ) : my_scoped_lock(*m.impl(),write) { }
85 
86  void acquire( padded_mutex& m, bool write = true ) { my_scoped_lock.acquire(*m.impl(),write); }
87  bool try_acquire( padded_mutex& m, bool write = true ) { return my_scoped_lock.try_acquire(*m.impl(),write); }
88  bool upgrade_to_writer() { return my_scoped_lock.upgrade_to_writer(); }
89  bool downgrade_to_reader() { return my_scoped_lock.downgrade_to_reader(); }
90  void release() { my_scoped_lock.release(); }
91  };
92 };
93 
94 } // namespace internal
95 } // namespace interface7
96 } // namespace tbb
97 
98 #endif /* __TBB_mutex_padding_H */
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
static const size_t cache_line_size
The graph class.

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.