/Users/buildslave/jenkins/workspace/coverage/llvm-project/libcxx/src/atomic.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===----------------------------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include <__config> |
10 | | #ifndef _LIBCPP_HAS_NO_THREADS |
11 | | |
12 | | #include <atomic> |
13 | | #include <climits> |
14 | | #include <functional> |
15 | | #include <thread> |
16 | | |
17 | | #ifdef __linux__ |
18 | | |
19 | | #include <unistd.h> |
20 | | #include <linux/futex.h> |
21 | | #include <sys/syscall.h> |
22 | | |
23 | | // libc++ uses SYS_futex as a universal syscall name. However, on 32 bit architectures |
24 | | // with a 64 bit time_t, we need to specify SYS_futex_time64. |
25 | | #if !defined(SYS_futex) && defined(SYS_futex_time64) |
26 | | # define SYS_futex SYS_futex_time64 |
27 | | #endif |
28 | | |
29 | | #else // <- Add other operating systems here |
30 | | |
31 | | // Baseline needs no new headers |
32 | | |
33 | | #endif |
34 | | |
35 | | _LIBCPP_BEGIN_NAMESPACE_STD |
36 | | |
37 | | #ifdef __linux__ |
38 | | |
39 | | static void __libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, |
40 | | __cxx_contention_t __val) |
41 | | { |
42 | | static constexpr timespec __timeout = { 2, 0 }; |
43 | | syscall(SYS_futex, __ptr, FUTEX_WAIT_PRIVATE, __val, &__timeout, 0, 0); |
44 | | } |
45 | | |
46 | | static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr, |
47 | | bool __notify_one) |
48 | | { |
49 | | syscall(SYS_futex, __ptr, FUTEX_WAKE_PRIVATE, __notify_one ? 1 : INT_MAX, 0, 0, 0); |
50 | | } |
51 | | |
52 | | #elif defined(__APPLE__) && defined(_LIBCPP_USE_ULOCK) |
53 | | |
54 | | extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value, |
55 | | uint32_t timeout); /* timeout is specified in microseconds */ |
56 | | extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); |
57 | | |
58 | | #define UL_COMPARE_AND_WAIT 1 |
59 | | #define ULF_WAKE_ALL 0x00000100 |
60 | | |
61 | | static void __libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, |
62 | | __cxx_contention_t __val) |
63 | | { |
64 | | __ulock_wait(UL_COMPARE_AND_WAIT, |
65 | | const_cast<__cxx_atomic_contention_t*>(__ptr), __val, 0); |
66 | | } |
67 | | |
68 | | static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr, |
69 | | bool __notify_one) |
70 | | { |
71 | | __ulock_wake(UL_COMPARE_AND_WAIT | (__notify_one ? 0 : ULF_WAKE_ALL), |
72 | | const_cast<__cxx_atomic_contention_t*>(__ptr), 0); |
73 | | } |
74 | | |
75 | | #else // <- Add other operating systems here |
76 | | |
77 | | // Baseline is just a timed backoff |
78 | | |
79 | | static void __libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, |
80 | | __cxx_contention_t __val) |
81 | 0 | { |
82 | 0 | __libcpp_thread_poll_with_backoff([=]() -> bool { |
83 | 0 | return !__cxx_nonatomic_compare_equal(__cxx_atomic_load(__ptr, memory_order_relaxed), __val); |
84 | 0 | }, __libcpp_timed_backoff_policy()); |
85 | 0 | } |
86 | | |
87 | 0 | static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile*, bool) { } |
88 | | |
89 | | #endif // __linux__ |
90 | | |
91 | | static constexpr size_t __libcpp_contention_table_size = (1 << 8); /* < there's no magic in this number */ |
92 | | |
93 | | struct alignas(64) /* aim to avoid false sharing */ __libcpp_contention_table_entry |
94 | | { |
95 | | __cxx_atomic_contention_t __contention_state; |
96 | | __cxx_atomic_contention_t __platform_state; |
97 | | inline constexpr __libcpp_contention_table_entry() : |
98 | 0 | __contention_state(0), __platform_state(0) { } |
99 | | }; |
100 | | |
101 | | static __libcpp_contention_table_entry __libcpp_contention_table[ __libcpp_contention_table_size ]; |
102 | | |
103 | | static hash<void const volatile*> __libcpp_contention_hasher; |
104 | | |
105 | | static __libcpp_contention_table_entry* __libcpp_contention_state(void const volatile * p) |
106 | 0 | { |
107 | 0 | return &__libcpp_contention_table[__libcpp_contention_hasher(p) & (__libcpp_contention_table_size - 1)]; |
108 | 0 | } |
109 | | |
110 | | /* Given an atomic to track contention and an atomic to actually wait on, which may be |
111 | | the same atomic, we try to detect contention to avoid spuriously calling the platform. */ |
112 | | |
113 | | static void __libcpp_contention_notify(__cxx_atomic_contention_t volatile* __contention_state, |
114 | | __cxx_atomic_contention_t const volatile* __platform_state, |
115 | | bool __notify_one) |
116 | 0 | { |
117 | 0 | if(0 != __cxx_atomic_load(__contention_state, memory_order_seq_cst)) |
118 | | // We only call 'wake' if we consumed a contention bit here. |
119 | 0 | __libcpp_platform_wake_by_address(__platform_state, __notify_one); |
120 | 0 | } |
121 | | static __cxx_contention_t __libcpp_contention_monitor_for_wait(__cxx_atomic_contention_t volatile* __contention_state, |
122 | | __cxx_atomic_contention_t const volatile* __platform_state) |
123 | 0 | { |
124 | | // We will monitor this value. |
125 | 0 | return __cxx_atomic_load(__platform_state, memory_order_acquire); |
126 | 0 | } |
127 | | static void __libcpp_contention_wait(__cxx_atomic_contention_t volatile* __contention_state, |
128 | | __cxx_atomic_contention_t const volatile* __platform_state, |
129 | | __cxx_contention_t __old_value) |
130 | 0 | { |
131 | 0 | __cxx_atomic_fetch_add(__contention_state, __cxx_contention_t(1), memory_order_seq_cst); |
132 | | // We sleep as long as the monitored value hasn't changed. |
133 | 0 | __libcpp_platform_wait_on_address(__platform_state, __old_value); |
134 | 0 | __cxx_atomic_fetch_sub(__contention_state, __cxx_contention_t(1), memory_order_release); |
135 | 0 | } |
136 | | |
137 | | /* When the incoming atomic is the wrong size for the platform wait size, need to |
138 | | launder the value sequence through an atomic from our table. */ |
139 | | |
140 | | static void __libcpp_atomic_notify(void const volatile* __location) |
141 | 0 | { |
142 | 0 | auto const __entry = __libcpp_contention_state(__location); |
143 | | // The value sequence laundering happens on the next line below. |
144 | 0 | __cxx_atomic_fetch_add(&__entry->__platform_state, __cxx_contention_t(1), memory_order_release); |
145 | 0 | __libcpp_contention_notify(&__entry->__contention_state, |
146 | 0 | &__entry->__platform_state, |
147 | 0 | false /* when laundering, we can't handle notify_one */); |
148 | 0 | } |
149 | | _LIBCPP_EXPORTED_FROM_ABI |
150 | | void __cxx_atomic_notify_one(void const volatile* __location) |
151 | 0 | { __libcpp_atomic_notify(__location); } |
152 | | _LIBCPP_EXPORTED_FROM_ABI |
153 | | void __cxx_atomic_notify_all(void const volatile* __location) |
154 | 0 | { __libcpp_atomic_notify(__location); } |
155 | | _LIBCPP_EXPORTED_FROM_ABI |
156 | | __cxx_contention_t __libcpp_atomic_monitor(void const volatile* __location) |
157 | 0 | { |
158 | 0 | auto const __entry = __libcpp_contention_state(__location); |
159 | 0 | return __libcpp_contention_monitor_for_wait(&__entry->__contention_state, &__entry->__platform_state); |
160 | 0 | } |
161 | | _LIBCPP_EXPORTED_FROM_ABI |
162 | | void __libcpp_atomic_wait(void const volatile* __location, __cxx_contention_t __old_value) |
163 | 0 | { |
164 | 0 | auto const __entry = __libcpp_contention_state(__location); |
165 | 0 | __libcpp_contention_wait(&__entry->__contention_state, &__entry->__platform_state, __old_value); |
166 | 0 | } |
167 | | |
168 | | /* When the incoming atomic happens to be the platform wait size, we still need to use the |
169 | | table for the contention detection, but we can use the atomic directly for the wait. */ |
170 | | |
171 | | _LIBCPP_EXPORTED_FROM_ABI |
172 | | void __cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile* __location) |
173 | 0 | { |
174 | 0 | __libcpp_contention_notify(&__libcpp_contention_state(__location)->__contention_state, __location, true); |
175 | 0 | } |
176 | | _LIBCPP_EXPORTED_FROM_ABI |
177 | | void __cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile* __location) |
178 | 0 | { |
179 | 0 | __libcpp_contention_notify(&__libcpp_contention_state(__location)->__contention_state, __location, false); |
180 | 0 | } |
181 | | _LIBCPP_EXPORTED_FROM_ABI |
182 | | __cxx_contention_t __libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile* __location) |
183 | 0 | { |
184 | 0 | return __libcpp_contention_monitor_for_wait(&__libcpp_contention_state(__location)->__contention_state, __location); |
185 | 0 | } |
186 | | _LIBCPP_EXPORTED_FROM_ABI |
187 | | void __libcpp_atomic_wait(__cxx_atomic_contention_t const volatile* __location, __cxx_contention_t __old_value) |
188 | 0 | { |
189 | 0 | __libcpp_contention_wait(&__libcpp_contention_state(__location)->__contention_state, __location, __old_value); |
190 | 0 | } |
191 | | |
192 | | _LIBCPP_END_NAMESPACE_STD |
193 | | |
194 | | #endif //_LIBCPP_HAS_NO_THREADS |