/Users/buildslave/jenkins/workspace/coverage/llvm-project/libcxx/src/memory.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===----------------------------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include <__config> |
10 | | #ifdef _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS |
11 | | # define _LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS |
12 | | #endif |
13 | | |
14 | | #include <memory> |
15 | | |
16 | | #ifndef _LIBCPP_HAS_NO_THREADS |
17 | | # include <mutex> |
18 | | # include <thread> |
19 | | # if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB) |
20 | | # pragma comment(lib, "pthread") |
21 | | # endif |
22 | | #endif |
23 | | |
24 | | #include "include/atomic_support.h" |
25 | | |
26 | | _LIBCPP_BEGIN_NAMESPACE_STD |
27 | | |
28 | | const allocator_arg_t allocator_arg = allocator_arg_t(); |
29 | | |
30 | 0 | bad_weak_ptr::~bad_weak_ptr() noexcept {} |
31 | | |
32 | | const char* |
33 | | bad_weak_ptr::what() const noexcept |
34 | 0 | { |
35 | 0 | return "bad_weak_ptr"; |
36 | 0 | } |
37 | | |
38 | | __shared_count::~__shared_count() |
39 | 134 | { |
40 | 134 | } |
41 | | |
42 | | __shared_weak_count::~__shared_weak_count() |
43 | 0 | { |
44 | 0 | } |
45 | | |
46 | | #if defined(_LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS) |
47 | | void |
48 | | __shared_count::__add_shared() noexcept |
49 | | { |
50 | | __libcpp_atomic_refcount_increment(__shared_owners_); |
51 | | } |
52 | | |
53 | | bool |
54 | | __shared_count::__release_shared() noexcept |
55 | | { |
56 | | if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1) |
57 | | { |
58 | | __on_zero_shared(); |
59 | | return true; |
60 | | } |
61 | | return false; |
62 | | } |
63 | | |
64 | | void |
65 | | __shared_weak_count::__add_shared() noexcept |
66 | 412M | { |
67 | 412M | __shared_count::__add_shared(); |
68 | 412M | } |
69 | | |
70 | | void |
71 | | __shared_weak_count::__add_weak() noexcept |
72 | 536M | { |
73 | 536M | __libcpp_atomic_refcount_increment(__shared_weak_owners_); |
74 | 536M | } |
75 | | |
76 | | void |
77 | | __shared_weak_count::__release_shared() noexcept |
78 | 2.28G | { |
79 | 2.28G | if (__shared_count::__release_shared()) |
80 | 120M | __release_weak(); |
81 | 2.28G | } |
82 | | |
83 | | #endif // _LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS |
84 | | |
85 | | void |
86 | | __shared_weak_count::__release_weak() noexcept |
87 | 5.35M | { |
88 | | // NOTE: The acquire load here is an optimization of the very |
89 | | // common case where a shared pointer is being destructed while |
90 | | // having no other contended references. |
91 | | // |
92 | | // BENEFIT: We avoid expensive atomic stores like XADD and STREX |
93 | | // in a common case. Those instructions are slow and do nasty |
94 | | // things to caches. |
95 | | // |
96 | | // IS THIS SAFE? Yes. During weak destruction, if we see that we |
97 | | // are the last reference, we know that no-one else is accessing |
98 | | // us. If someone were accessing us, then they would be doing so |
99 | | // while the last shared / weak_ptr was being destructed, and |
100 | | // that's undefined anyway. |
101 | | // |
102 | | // If we see anything other than a 0, then we have possible |
103 | | // contention, and need to use an atomicrmw primitive. |
104 | | // The same arguments don't apply for increment, where it is legal |
105 | | // (though inadvisable) to share shared_ptr references between |
106 | | // threads, and have them all get copied at once. The argument |
107 | | // also doesn't apply for __release_shared, because an outstanding |
108 | | // weak_ptr::lock() could read / modify the shared count. |
109 | 5.35M | if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0) |
110 | 765k | { |
111 | | // no need to do this store, because we are about |
112 | | // to destroy everything. |
113 | | //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release); |
114 | 765k | __on_zero_shared_weak(); |
115 | 765k | } |
116 | 4.58M | else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1) |
117 | 0 | __on_zero_shared_weak(); |
118 | 5.35M | } |
119 | | |
120 | | __shared_weak_count* |
121 | | __shared_weak_count::lock() noexcept |
122 | 22.3M | { |
123 | 22.3M | long object_owners = __libcpp_atomic_load(&__shared_owners_); |
124 | 22.3M | while (object_owners != -122.3M ) |
125 | 22.3M | { |
126 | 22.3M | if (__libcpp_atomic_compare_exchange(&__shared_owners_, |
127 | 22.3M | &object_owners, |
128 | 22.3M | object_owners+1)) |
129 | 22.3M | return this; |
130 | 22.3M | } |
131 | 71 | return nullptr; |
132 | 22.3M | } |
133 | | |
134 | | const void* |
135 | | __shared_weak_count::__get_deleter(const type_info&) const noexcept |
136 | 0 | { |
137 | 0 | return nullptr; |
138 | 0 | } |
139 | | |
140 | | #if !defined(_LIBCPP_HAS_NO_THREADS) |
141 | | |
142 | | static constexpr std::size_t __sp_mut_count = 16; |
143 | | static constinit __libcpp_mutex_t mut_back[__sp_mut_count] = |
144 | | { |
145 | | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, |
146 | | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, |
147 | | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, |
148 | | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER |
149 | | }; |
150 | | |
151 | | _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) noexcept |
152 | | : __lx(p) |
153 | | { |
154 | | } |
155 | | |
156 | | void |
157 | | __sp_mut::lock() noexcept |
158 | 0 | { |
159 | 0 | auto m = static_cast<__libcpp_mutex_t*>(__lx); |
160 | 0 | unsigned count = 0; |
161 | 0 | while (!__libcpp_mutex_trylock(m)) |
162 | 0 | { |
163 | 0 | if (++count > 16) |
164 | 0 | { |
165 | 0 | __libcpp_mutex_lock(m); |
166 | 0 | break; |
167 | 0 | } |
168 | 0 | this_thread::yield(); |
169 | 0 | } |
170 | 0 | } |
171 | | |
172 | | void |
173 | | __sp_mut::unlock() noexcept |
174 | 0 | { |
175 | 0 | __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx)); |
176 | 0 | } |
177 | | |
178 | | __sp_mut& |
179 | | __get_sp_mut(const void* p) |
180 | 0 | { |
181 | 0 | static constinit __sp_mut muts[__sp_mut_count] = { |
182 | 0 | &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3], |
183 | 0 | &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7], |
184 | 0 | &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11], |
185 | 0 | &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15] |
186 | 0 | }; |
187 | 0 | return muts[hash<const void*>()(p) & (__sp_mut_count-1)]; |
188 | 0 | } |
189 | | |
190 | | #endif // !defined(_LIBCPP_HAS_NO_THREADS) |
191 | | |
192 | | void* |
193 | | align(size_t alignment, size_t size, void*& ptr, size_t& space) |
194 | 0 | { |
195 | 0 | void* r = nullptr; |
196 | 0 | if (size <= space) |
197 | 0 | { |
198 | 0 | char* p1 = static_cast<char*>(ptr); |
199 | 0 | char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment); |
200 | 0 | size_t d = static_cast<size_t>(p2 - p1); |
201 | 0 | if (d <= space - size) |
202 | 0 | { |
203 | 0 | r = p2; |
204 | 0 | ptr = r; |
205 | 0 | space -= d; |
206 | 0 | } |
207 | 0 | } |
208 | 0 | return r; |
209 | 0 | } |
210 | | |
211 | | _LIBCPP_END_NAMESPACE_STD |