/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Support/FoldingSet.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- Support/FoldingSet.cpp - Uniquing Hash Set --------------*- C++ -*-===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file implements a hash set that can be used to remove duplication of |
11 | | // nodes in a graph. |
12 | | // |
13 | | //===----------------------------------------------------------------------===// |
14 | | |
15 | | #include "llvm/ADT/FoldingSet.h" |
16 | | #include "llvm/ADT/Hashing.h" |
17 | | #include "llvm/Support/Allocator.h" |
18 | | #include "llvm/Support/ErrorHandling.h" |
19 | | #include "llvm/Support/Host.h" |
20 | | #include "llvm/Support/MathExtras.h" |
21 | | #include <cassert> |
22 | | #include <cstring> |
23 | | using namespace llvm; |
24 | | |
25 | | //===----------------------------------------------------------------------===// |
26 | | // FoldingSetNodeIDRef Implementation |
27 | | |
28 | | /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef, |
29 | | /// used to lookup the node in the FoldingSetBase. |
30 | 564M | unsigned FoldingSetNodeIDRef::ComputeHash() const { |
31 | 564M | return static_cast<unsigned>(hash_combine_range(Data, Data+Size)); |
32 | 564M | } |
33 | | |
34 | 553M | bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const { |
35 | 553M | if (Size != RHS.Size553M ) return false148M ; |
36 | 404M | return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0; |
37 | 404M | } |
38 | | |
39 | | /// Used to compare the "ordering" of two nodes as defined by the |
40 | | /// profiled bits and their ordering defined by memcmp(). |
41 | 0 | bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const { |
42 | 0 | if (Size != RHS.Size) |
43 | 0 | return Size < RHS.Size; |
44 | 0 | return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0; |
45 | 0 | } |
46 | | |
47 | | //===----------------------------------------------------------------------===// |
48 | | // FoldingSetNodeID Implementation |
49 | | |
50 | | /// Add* - Add various data types to Bit data. |
51 | | /// |
52 | 2.08G | void FoldingSetNodeID::AddPointer(const void *Ptr) { |
53 | 2.08G | // Note: this adds pointers to the hash using sizes and endianness that |
54 | 2.08G | // depend on the host. It doesn't matter, however, because hashing on |
55 | 2.08G | // pointer values is inherently unstable. Nothing should depend on the |
56 | 2.08G | // ordering of nodes in the folding set. |
57 | 2.08G | static_assert(sizeof(uintptr_t) <= sizeof(unsigned long long), |
58 | 2.08G | "unexpected pointer size"); |
59 | 2.08G | AddInteger(reinterpret_cast<uintptr_t>(Ptr)); |
60 | 2.08G | } |
61 | 277M | void FoldingSetNodeID::AddInteger(signed I) { |
62 | 277M | Bits.push_back(I); |
63 | 277M | } |
64 | 5.56G | void FoldingSetNodeID::AddInteger(unsigned I) { |
65 | 5.56G | Bits.push_back(I); |
66 | 5.56G | } |
67 | 97.6M | void FoldingSetNodeID::AddInteger(long I) { |
68 | 97.6M | AddInteger((unsigned long)I); |
69 | 97.6M | } |
70 | 2.29G | void FoldingSetNodeID::AddInteger(unsigned long I) { |
71 | 2.29G | if (sizeof(long) == sizeof(int)) |
72 | 0 | AddInteger(unsigned(I)); |
73 | 2.29G | else if (2.29G sizeof(long) == sizeof(long long)2.29G ) { |
74 | 2.29G | AddInteger((unsigned long long)I); |
75 | 2.29G | } else { |
76 | 18.4E | llvm_unreachable("unexpected sizeof(long)"); |
77 | 2.29G | } |
78 | 2.29G | } |
79 | 8.70M | void FoldingSetNodeID::AddInteger(long long I) { |
80 | 8.70M | AddInteger((unsigned long long)I); |
81 | 8.70M | } |
82 | 2.30G | void FoldingSetNodeID::AddInteger(unsigned long long I) { |
83 | 2.30G | AddInteger(unsigned(I)); |
84 | 2.30G | AddInteger(unsigned(I >> 32)); |
85 | 2.30G | } |
86 | | |
87 | 48.5M | void FoldingSetNodeID::AddString(StringRef String) { |
88 | 48.5M | unsigned Size = String.size(); |
89 | 48.5M | Bits.push_back(Size); |
90 | 48.5M | if (!Size48.5M ) return12.4k ; |
91 | 48.4M | |
92 | 48.4M | unsigned Units = Size / 4; |
93 | 48.4M | unsigned Pos = 0; |
94 | 48.4M | const unsigned *Base = (const unsigned*) String.data(); |
95 | 48.4M | |
96 | 48.4M | // If the string is aligned do a bulk transfer. |
97 | 48.4M | if (!((intptr_t)Base & 3)48.4M ) { |
98 | 7.62M | Bits.append(Base, Base + Units); |
99 | 7.62M | Pos = (Units + 1) * 4; |
100 | 48.4M | } else { |
101 | 40.8M | // Otherwise do it the hard way. |
102 | 40.8M | // To be compatible with above bulk transfer, we need to take endianness |
103 | 40.8M | // into account. |
104 | 40.8M | static_assert(sys::IsBigEndianHost || sys::IsLittleEndianHost, |
105 | 40.8M | "Unexpected host endianness"); |
106 | 40.8M | if (sys::IsBigEndianHost40.8M ) { |
107 | 0 | for (Pos += 4; Pos <= Size0 ; Pos += 40 ) { |
108 | 0 | unsigned V = ((unsigned char)String[Pos - 4] << 24) | |
109 | 0 | ((unsigned char)String[Pos - 3] << 16) | |
110 | 0 | ((unsigned char)String[Pos - 2] << 8) | |
111 | 0 | (unsigned char)String[Pos - 1]; |
112 | 0 | Bits.push_back(V); |
113 | 0 | } |
114 | 40.8M | } else { // Little-endian host |
115 | 125M | for (Pos += 4; Pos <= Size125M ; Pos += 484.5M ) { |
116 | 84.5M | unsigned V = ((unsigned char)String[Pos - 1] << 24) | |
117 | 84.5M | ((unsigned char)String[Pos - 2] << 16) | |
118 | 84.5M | ((unsigned char)String[Pos - 3] << 8) | |
119 | 84.5M | (unsigned char)String[Pos - 4]; |
120 | 84.5M | Bits.push_back(V); |
121 | 84.5M | } |
122 | 40.8M | } |
123 | 40.8M | } |
124 | 48.4M | |
125 | 48.4M | // With the leftover bits. |
126 | 48.4M | unsigned V = 0; |
127 | 48.4M | // Pos will have overshot size by 4 - #bytes left over. |
128 | 48.4M | // No need to take endianness into account here - this is always executed. |
129 | 48.4M | switch (Pos - Size) { |
130 | 9.83M | case 1: V = (V << 8) | (unsigned char)String[Size - 3]; 9.83M LLVM_FALLTHROUGH9.83M ; |
131 | 21.1M | case 2: V = (V << 8) | (unsigned char)String[Size - 2]; 21.1M LLVM_FALLTHROUGH21.1M ; |
132 | 45.2M | case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break; |
133 | 3.22M | default: return; // Nothing left. |
134 | 45.2M | } |
135 | 45.2M | |
136 | 45.2M | Bits.push_back(V); |
137 | 45.2M | } |
138 | | |
139 | | // AddNodeID - Adds the Bit data of another ID to *this. |
140 | 231k | void FoldingSetNodeID::AddNodeID(const FoldingSetNodeID &ID) { |
141 | 231k | Bits.append(ID.Bits.begin(), ID.Bits.end()); |
142 | 231k | } |
143 | | |
144 | | /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to |
145 | | /// lookup the node in the FoldingSetBase. |
146 | 542M | unsigned FoldingSetNodeID::ComputeHash() const { |
147 | 542M | return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash(); |
148 | 542M | } |
149 | | |
150 | | /// operator== - Used to compare two nodes to each other. |
151 | | /// |
152 | 227M | bool FoldingSetNodeID::operator==(const FoldingSetNodeID &RHS) const { |
153 | 227M | return *this == FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); |
154 | 227M | } |
155 | | |
156 | | /// operator== - Used to compare two nodes to each other. |
157 | | /// |
158 | 553M | bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const { |
159 | 553M | return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS; |
160 | 553M | } |
161 | | |
162 | | /// Used to compare the "ordering" of two nodes as defined by the |
163 | | /// profiled bits and their ordering defined by memcmp(). |
164 | 0 | bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS) const { |
165 | 0 | return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); |
166 | 0 | } |
167 | | |
168 | 0 | bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const { |
169 | 0 | return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS; |
170 | 0 | } |
171 | | |
172 | | /// Intern - Copy this node's data to a memory region allocated from the |
173 | | /// given allocator and return a FoldingSetNodeIDRef describing the |
174 | | /// interned data. |
175 | | FoldingSetNodeIDRef |
176 | 33.2M | FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const { |
177 | 33.2M | unsigned *New = Allocator.Allocate<unsigned>(Bits.size()); |
178 | 33.2M | std::uninitialized_copy(Bits.begin(), Bits.end(), New); |
179 | 33.2M | return FoldingSetNodeIDRef(New, Bits.size()); |
180 | 33.2M | } |
181 | | |
182 | | //===----------------------------------------------------------------------===// |
183 | | /// Helper functions for FoldingSetBase. |
184 | | |
185 | | /// GetNextPtr - In order to save space, each bucket is a |
186 | | /// singly-linked-list. In order to make deletion more efficient, we make |
187 | | /// the list circular, so we can delete a node without computing its hash. |
188 | | /// The problem with this is that the start of the hash buckets are not |
189 | | /// Nodes. If NextInBucketPtr is a bucket pointer, this method returns null: |
190 | | /// use GetBucketPtr when this happens. |
191 | 919M | static FoldingSetBase::Node *GetNextPtr(void *NextInBucketPtr) { |
192 | 919M | // The low bit is set if this is the pointer back to the bucket. |
193 | 919M | if (reinterpret_cast<intptr_t>(NextInBucketPtr) & 1) |
194 | 168M | return nullptr; |
195 | 751M | |
196 | 751M | return static_cast<FoldingSetBase::Node*>(NextInBucketPtr); |
197 | 751M | } |
198 | | |
199 | | |
200 | | /// testing. |
201 | 75.6M | static void **GetBucketPtr(void *NextInBucketPtr) { |
202 | 75.6M | intptr_t Ptr = reinterpret_cast<intptr_t>(NextInBucketPtr); |
203 | 75.6M | assert((Ptr & 1) && "Not a bucket pointer"); |
204 | 75.6M | return reinterpret_cast<void**>(Ptr & ~intptr_t(1)); |
205 | 75.6M | } |
206 | | |
207 | | /// GetBucketFor - Hash the specified node ID and return the hash bucket for |
208 | | /// the specified ID. |
209 | 559M | static void **GetBucketFor(unsigned Hash, void **Buckets, unsigned NumBuckets) { |
210 | 559M | // NumBuckets is always a power of 2. |
211 | 559M | unsigned BucketNum = Hash & (NumBuckets-1); |
212 | 559M | return Buckets + BucketNum; |
213 | 559M | } |
214 | | |
215 | | /// AllocateBuckets - Allocated initialized bucket memory. |
216 | 13.1M | static void **AllocateBuckets(unsigned NumBuckets) { |
217 | 13.1M | void **Buckets = static_cast<void**>(calloc(NumBuckets+1, sizeof(void*))); |
218 | 13.1M | |
219 | 13.1M | if (Buckets == nullptr) |
220 | 0 | report_bad_alloc_error("Allocation of Buckets failed."); |
221 | 13.1M | |
222 | 13.1M | // Set the very last bucket to be a non-null "pointer". |
223 | 13.1M | Buckets[NumBuckets] = reinterpret_cast<void*>(-1); |
224 | 13.1M | return Buckets; |
225 | 13.1M | } |
226 | | |
227 | | //===----------------------------------------------------------------------===// |
228 | | // FoldingSetBase Implementation |
229 | | |
230 | 0 | void FoldingSetBase::anchor() {} |
231 | | |
232 | 12.9M | FoldingSetBase::FoldingSetBase(unsigned Log2InitSize) { |
233 | 12.9M | assert(5 < Log2InitSize && Log2InitSize < 32 && |
234 | 12.9M | "Initial hash table size out of range"); |
235 | 12.9M | NumBuckets = 1 << Log2InitSize; |
236 | 12.9M | Buckets = AllocateBuckets(NumBuckets); |
237 | 12.9M | NumNodes = 0; |
238 | 12.9M | } |
239 | | |
240 | | FoldingSetBase::FoldingSetBase(FoldingSetBase &&Arg) |
241 | 2.26k | : Buckets(Arg.Buckets), NumBuckets(Arg.NumBuckets), NumNodes(Arg.NumNodes) { |
242 | 2.26k | Arg.Buckets = nullptr; |
243 | 2.26k | Arg.NumBuckets = 0; |
244 | 2.26k | Arg.NumNodes = 0; |
245 | 2.26k | } |
246 | | |
247 | 0 | FoldingSetBase &FoldingSetBase::operator=(FoldingSetBase &&RHS) { |
248 | 0 | free(Buckets); // This may be null if the set is in a moved-from state. |
249 | 0 | Buckets = RHS.Buckets; |
250 | 0 | NumBuckets = RHS.NumBuckets; |
251 | 0 | NumNodes = RHS.NumNodes; |
252 | 0 | RHS.Buckets = nullptr; |
253 | 0 | RHS.NumBuckets = 0; |
254 | 0 | RHS.NumNodes = 0; |
255 | 0 | return *this; |
256 | 0 | } |
257 | | |
258 | 12.4M | FoldingSetBase::~FoldingSetBase() { |
259 | 12.4M | free(Buckets); |
260 | 12.4M | } |
261 | | |
262 | 3.42M | void FoldingSetBase::clear() { |
263 | 3.42M | // Set all but the last bucket to null pointers. |
264 | 3.42M | memset(Buckets, 0, NumBuckets*sizeof(void*)); |
265 | 3.42M | |
266 | 3.42M | // Set the very last bucket to be a non-null "pointer". |
267 | 3.42M | Buckets[NumBuckets] = reinterpret_cast<void*>(-1); |
268 | 3.42M | |
269 | 3.42M | // Reset the node count to zero. |
270 | 3.42M | NumNodes = 0; |
271 | 3.42M | } |
272 | | |
273 | 131k | void FoldingSetBase::GrowBucketCount(unsigned NewBucketCount) { |
274 | 131k | assert((NewBucketCount > NumBuckets) && "Can't shrink a folding set with GrowBucketCount"); |
275 | 131k | assert(isPowerOf2_32(NewBucketCount) && "Bad bucket count!"); |
276 | 131k | void **OldBuckets = Buckets; |
277 | 131k | unsigned OldNumBuckets = NumBuckets; |
278 | 131k | |
279 | 131k | // Clear out new buckets. |
280 | 131k | Buckets = AllocateBuckets(NewBucketCount); |
281 | 131k | // Set NumBuckets only if allocation of new buckets was succesful |
282 | 131k | NumBuckets = NewBucketCount; |
283 | 131k | NumNodes = 0; |
284 | 131k | |
285 | 131k | // Walk the old buckets, rehashing nodes into their new place. |
286 | 131k | FoldingSetNodeID TempID; |
287 | 21.0M | for (unsigned i = 0; i != OldNumBuckets21.0M ; ++i20.9M ) { |
288 | 20.9M | void *Probe = OldBuckets[i]; |
289 | 20.9M | if (!Probe20.9M ) continue3.10M ; |
290 | 58.9M | while (Node *17.8M NodeInBucket58.9M = GetNextPtr(Probe)) { |
291 | 41.0M | // Figure out the next link, remove NodeInBucket from the old link. |
292 | 41.0M | Probe = NodeInBucket->getNextInBucket(); |
293 | 41.0M | NodeInBucket->SetNextInBucket(nullptr); |
294 | 41.0M | |
295 | 41.0M | // Insert the node into the new bucket, after recomputing the hash. |
296 | 41.0M | InsertNode(NodeInBucket, |
297 | 41.0M | GetBucketFor(ComputeNodeHash(NodeInBucket, TempID), |
298 | 41.0M | Buckets, NumBuckets)); |
299 | 41.0M | TempID.clear(); |
300 | 41.0M | } |
301 | 20.9M | } |
302 | 131k | |
303 | 131k | free(OldBuckets); |
304 | 131k | } |
305 | | |
306 | | /// GrowHashTable - Double the size of the hash table and rehash everything. |
307 | | /// |
308 | 124k | void FoldingSetBase::GrowHashTable() { |
309 | 124k | GrowBucketCount(NumBuckets * 2); |
310 | 124k | } |
311 | | |
312 | 6.88k | void FoldingSetBase::reserve(unsigned EltCount) { |
313 | 6.88k | // This will give us somewhere between EltCount / 2 and |
314 | 6.88k | // EltCount buckets. This puts us in the load factor |
315 | 6.88k | // range of 1.0 - 2.0. |
316 | 6.88k | if(EltCount < capacity()) |
317 | 8 | return; |
318 | 6.87k | GrowBucketCount(PowerOf2Floor(EltCount)); |
319 | 6.87k | } |
320 | | |
321 | | /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, |
322 | | /// return it. If not, return the insertion token that will make insertion |
323 | | /// faster. |
324 | | FoldingSetBase::Node * |
325 | | FoldingSetBase::FindNodeOrInsertPos(const FoldingSetNodeID &ID, |
326 | 518M | void *&InsertPos) { |
327 | 518M | unsigned IDHash = ID.ComputeHash(); |
328 | 518M | void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets); |
329 | 518M | void *Probe = *Bucket; |
330 | 518M | |
331 | 518M | InsertPos = nullptr; |
332 | 518M | |
333 | 518M | FoldingSetNodeID TempID; |
334 | 753M | while (Node *NodeInBucket753M = GetNextPtr(Probe)) { |
335 | 554M | if (NodeEquals(NodeInBucket, ID, IDHash, TempID)) |
336 | 319M | return NodeInBucket; |
337 | 234M | TempID.clear(); |
338 | 234M | |
339 | 234M | Probe = NodeInBucket->getNextInBucket(); |
340 | 234M | } |
341 | 518M | |
342 | 518M | // Didn't find the node, return null with the bucket as the InsertPos. |
343 | 198M | InsertPos = Bucket; |
344 | 198M | return nullptr; |
345 | 518M | } |
346 | | |
347 | | /// InsertNode - Insert the specified node into the folding set, knowing that it |
348 | | /// is not already in the map. InsertPos must be obtained from |
349 | | /// FindNodeOrInsertPos. |
350 | 229M | void FoldingSetBase::InsertNode(Node *N, void *InsertPos) { |
351 | 229M | assert(!N->getNextInBucket()); |
352 | 229M | // Do we need to grow the hashtable? |
353 | 229M | if (NumNodes+1 > capacity()229M ) { |
354 | 124k | GrowHashTable(); |
355 | 124k | FoldingSetNodeID TempID; |
356 | 124k | InsertPos = GetBucketFor(ComputeNodeHash(N, TempID), Buckets, NumBuckets); |
357 | 124k | } |
358 | 229M | |
359 | 229M | ++NumNodes; |
360 | 229M | |
361 | 229M | /// The insert position is actually a bucket pointer. |
362 | 229M | void **Bucket = static_cast<void**>(InsertPos); |
363 | 229M | |
364 | 229M | void *Next = *Bucket; |
365 | 229M | |
366 | 229M | // If this is the first insertion into this bucket, its next pointer will be |
367 | 229M | // null. Pretend as if it pointed to itself, setting the low bit to indicate |
368 | 229M | // that it is a pointer to the bucket. |
369 | 229M | if (!Next) |
370 | 145M | Next = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(Bucket)|1); |
371 | 229M | |
372 | 229M | // Set the node's next pointer, and make the bucket point to the node. |
373 | 229M | N->SetNextInBucket(Next); |
374 | 229M | *Bucket = N; |
375 | 229M | } |
376 | | |
377 | | /// RemoveNode - Remove a node from the folding set, returning true if one was |
378 | | /// removed or false if the node was not in the folding set. |
379 | 88.7M | bool FoldingSetBase::RemoveNode(Node *N) { |
380 | 88.7M | // Because each bucket is a circular list, we don't need to compute N's hash |
381 | 88.7M | // to remove it. |
382 | 88.7M | void *Ptr = N->getNextInBucket(); |
383 | 88.7M | if (!Ptr88.7M ) return false13.6M ; // Not in folding set. |
384 | 75.1M | |
385 | 75.1M | --NumNodes; |
386 | 75.1M | N->SetNextInBucket(nullptr); |
387 | 75.1M | |
388 | 75.1M | // Remember what N originally pointed to, either a bucket or another node. |
389 | 75.1M | void *NodeNextPtr = Ptr; |
390 | 75.1M | |
391 | 75.1M | // Chase around the list until we find the node (or bucket) which points to N. |
392 | 106M | while (true106M ) { |
393 | 106M | if (Node *NodeInBucket106M = GetNextPtr(Ptr)) { |
394 | 31.3M | // Advance pointer. |
395 | 31.3M | Ptr = NodeInBucket->getNextInBucket(); |
396 | 31.3M | |
397 | 31.3M | // We found a node that points to N, change it to point to N's next node, |
398 | 31.3M | // removing N from the list. |
399 | 31.3M | if (Ptr == N31.3M ) { |
400 | 9.40M | NodeInBucket->SetNextInBucket(NodeNextPtr); |
401 | 9.40M | return true; |
402 | 9.40M | } |
403 | 75.1M | } else { |
404 | 75.1M | void **Bucket = GetBucketPtr(Ptr); |
405 | 75.1M | Ptr = *Bucket; |
406 | 75.1M | |
407 | 75.1M | // If we found that the bucket points to N, update the bucket to point to |
408 | 75.1M | // whatever is next. |
409 | 75.1M | if (Ptr == N75.1M ) { |
410 | 65.7M | *Bucket = NodeNextPtr; |
411 | 65.7M | return true; |
412 | 65.7M | } |
413 | 75.1M | } |
414 | 106M | } |
415 | 88.7M | } |
416 | | |
417 | | /// GetOrInsertNode - If there is an existing simple Node exactly |
418 | | /// equal to the specified node, return it. Otherwise, insert 'N' and it |
419 | | /// instead. |
420 | 16.3M | FoldingSetBase::Node *FoldingSetBase::GetOrInsertNode(FoldingSetBase::Node *N) { |
421 | 16.3M | FoldingSetNodeID ID; |
422 | 16.3M | GetNodeProfile(N, ID); |
423 | 16.3M | void *IP; |
424 | 16.3M | if (Node *E = FindNodeOrInsertPos(ID, IP)) |
425 | 65.8k | return E; |
426 | 16.2M | InsertNode(N, IP); |
427 | 16.2M | return N; |
428 | 16.2M | } |
429 | | |
430 | | //===----------------------------------------------------------------------===// |
431 | | // FoldingSetIteratorImpl Implementation |
432 | | |
433 | 262k | FoldingSetIteratorImpl::FoldingSetIteratorImpl(void **Bucket) { |
434 | 262k | // Skip to the first non-null non-self-cycle bucket. |
435 | 5.56M | while (*Bucket != reinterpret_cast<void*>(-1) && |
436 | 5.56M | (!*Bucket || 5.39M !GetNextPtr(*Bucket)95.0k )) |
437 | 5.30M | ++Bucket; |
438 | 262k | |
439 | 262k | NodePtr = static_cast<FoldingSetNode*>(*Bucket); |
440 | 262k | } |
441 | | |
442 | 616k | void FoldingSetIteratorImpl::advance() { |
443 | 616k | // If there is another link within this bucket, go to it. |
444 | 616k | void *Probe = NodePtr->getNextInBucket(); |
445 | 616k | |
446 | 616k | if (FoldingSetNode *NextNodeInBucket = GetNextPtr(Probe)) |
447 | 73.4k | NodePtr = NextNodeInBucket; |
448 | 542k | else { |
449 | 542k | // Otherwise, this is the last link in this bucket. |
450 | 542k | void **Bucket = GetBucketPtr(Probe); |
451 | 542k | |
452 | 542k | // Skip to the next non-null non-self-cycle bucket. |
453 | 42.9M | do { |
454 | 42.9M | ++Bucket; |
455 | 542k | } while (*Bucket != reinterpret_cast<void*>(-1) && |
456 | 42.9M | (!*Bucket || 42.8M !GetNextPtr(*Bucket)447k )); |
457 | 542k | |
458 | 542k | NodePtr = static_cast<FoldingSetNode*>(*Bucket); |
459 | 542k | } |
460 | 616k | } |
461 | | |
462 | | //===----------------------------------------------------------------------===// |
463 | | // FoldingSetBucketIteratorImpl Implementation |
464 | | |
465 | 0 | FoldingSetBucketIteratorImpl::FoldingSetBucketIteratorImpl(void **Bucket) { |
466 | 0 | Ptr = (!*Bucket || !GetNextPtr(*Bucket)0 ) ? (void*) Bucket0 : *Bucket0 ; |
467 | 0 | } |