Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
msvc_armv7.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #if !defined(__TBB_machine_H) || defined(__TBB_msvc_armv7_H)
18 #error Do not #include this internal file directly; use public TBB headers instead.
19 #endif
20 
21 #define __TBB_msvc_armv7_H
22 
23 #include <intrin.h>
24 #include <float.h>
25 
26 #define __TBB_WORDSIZE 4
27 
28 #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED
29 
30 #if defined(TBB_WIN32_USE_CL_BUILTINS)
31 // We can test this on _M_IX86
32 #pragma intrinsic(_ReadWriteBarrier)
33 #pragma intrinsic(_mm_mfence)
34 #define __TBB_compiler_fence() _ReadWriteBarrier()
35 #define __TBB_full_memory_fence() _mm_mfence()
36 #define __TBB_control_consistency_helper() __TBB_compiler_fence()
37 #define __TBB_acquire_consistency_helper() __TBB_compiler_fence()
38 #define __TBB_release_consistency_helper() __TBB_compiler_fence()
39 #else
40 //Now __dmb(_ARM_BARRIER_SY) is used for both compiler and memory fences
41 //This might be changed later after testing
42 #define __TBB_compiler_fence() __dmb(_ARM_BARRIER_SY)
43 #define __TBB_full_memory_fence() __dmb(_ARM_BARRIER_SY)
44 #define __TBB_control_consistency_helper() __TBB_compiler_fence()
45 #define __TBB_acquire_consistency_helper() __TBB_full_memory_fence()
46 #define __TBB_release_consistency_helper() __TBB_full_memory_fence()
47 #endif
48 
49 //--------------------------------------------------
50 // Compare and swap
51 //--------------------------------------------------
52 
61 #define __TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(S,T,F) \
62 inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \
63  return _InterlockedCompareExchange##F(reinterpret_cast<volatile T *>(ptr),value,comparand); \
64 } \
65 
66 #define __TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(S,T,F) \
67 inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \
68  return _InterlockedExchangeAdd##F(reinterpret_cast<volatile T *>(ptr),value); \
69 } \
70 
76 #if defined(TBB_WIN32_USE_CL_BUILTINS)
77 // No _InterlockedExchangeAdd64 intrinsic on _M_IX86
78 #define __TBB_64BIT_ATOMICS 0
79 #else
81 #endif
82 
83 inline void __TBB_machine_pause (int32_t delay )
84 {
85  while(delay>0)
86  {
88  delay--;
89  }
90 }
91 
92 // API to retrieve/update FPU control setting
93 #define __TBB_CPU_CTL_ENV_PRESENT 1
94 
95 namespace tbb {
96 namespace internal {
97 
98 template <typename T, size_t S>
99 struct machine_load_store_relaxed {
100  static inline T load ( const volatile T& location ) {
101  const T value = location;
102 
103  /*
104  * An extra memory barrier is required for errata #761319
105  * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a
106  */
108  return value;
109  }
110 
111  static inline void store ( volatile T& location, T value ) {
112  location = value;
113  }
114 };
115 
116 class cpu_ctl_env {
117 private:
118  unsigned int my_ctl;
119 public:
120  bool operator!=( const cpu_ctl_env& ctl ) const { return my_ctl != ctl.my_ctl; }
121  void get_env() { my_ctl = _control87(0, 0); }
122  void set_env() const { _control87( my_ctl, ~0U ); }
123 };
124 
125 } // namespace internal
126 } // namespaces tbb
127 
128 // Machine specific atomic operations
129 #define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)
130 #define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)
131 #define __TBB_Pause(V) __TBB_machine_pause(V)
132 
133 // Use generics for some things
134 #define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1
135 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1
136 #define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1
137 #define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE 1
138 #define __TBB_USE_GENERIC_FETCH_STORE 1
139 #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1
140 #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1
141 
142 #if defined(TBB_WIN32_USE_CL_BUILTINS)
143 #if !__TBB_WIN8UI_SUPPORT
144 extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void );
145 #define __TBB_Yield() SwitchToThread()
146 #else
147 #include<thread>
148 #define __TBB_Yield() std::this_thread::yield()
149 #endif
150 #else
151 #define __TBB_Yield() __yield()
152 #endif
153 
154 // Machine specific atomic operations
155 #define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V)
156 #define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V)
157 
158 template <typename T1,typename T2>
159 inline void __TBB_machine_OR( T1 *operand, T2 addend ) {
160  _InterlockedOr((long volatile *)operand, (long)addend);
161 }
162 
163 template <typename T1,typename T2>
164 inline void __TBB_machine_AND( T1 *operand, T2 addend ) {
165  _InterlockedAnd((long volatile *)operand, (long)addend);
166 }
167 
#define __TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(S, T, F)
Definition: msvc_armv7.h:61
__declspec(dllimport) int __stdcall SwitchToThread(void)
void __TBB_machine_AND(T1 *operand, T2 addend)
Definition: msvc_armv7.h:164
bool operator!=(const cpu_ctl_env &ctl) const
Definition: msvc_armv7.h:120
#define __TBB_acquire_consistency_helper()
Definition: msvc_armv7.h:45
void __TBB_machine_OR(T1 *operand, T2 addend)
Definition: msvc_armv7.h:159
static T load(const volatile T &location)
Definition: msvc_armv7.h:100
void __TBB_machine_pause(int32_t delay)
Definition: msvc_armv7.h:83
static void store(volatile T &location, T value)
Definition: msvc_armv7.h:111
#define __TBB_compiler_fence()
Definition: msvc_armv7.h:42
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
The graph class.
#define __TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(S, T, F)
Definition: msvc_armv7.h:66

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.