/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/Hexagon/RDFGraph.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- RDFGraph.cpp -------------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // Target-independent, SSA-based data flow graph for register data flow (RDF). |
10 | | // |
11 | | #include "RDFGraph.h" |
12 | | #include "RDFRegisters.h" |
13 | | #include "llvm/ADT/BitVector.h" |
14 | | #include "llvm/ADT/STLExtras.h" |
15 | | #include "llvm/ADT/SetVector.h" |
16 | | #include "llvm/CodeGen/MachineBasicBlock.h" |
17 | | #include "llvm/CodeGen/MachineDominanceFrontier.h" |
18 | | #include "llvm/CodeGen/MachineDominators.h" |
19 | | #include "llvm/CodeGen/MachineFunction.h" |
20 | | #include "llvm/CodeGen/MachineInstr.h" |
21 | | #include "llvm/CodeGen/MachineOperand.h" |
22 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
23 | | #include "llvm/CodeGen/TargetInstrInfo.h" |
24 | | #include "llvm/CodeGen/TargetLowering.h" |
25 | | #include "llvm/CodeGen/TargetRegisterInfo.h" |
26 | | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
27 | | #include "llvm/IR/Function.h" |
28 | | #include "llvm/MC/LaneBitmask.h" |
29 | | #include "llvm/MC/MCInstrDesc.h" |
30 | | #include "llvm/MC/MCRegisterInfo.h" |
31 | | #include "llvm/Support/Debug.h" |
32 | | #include "llvm/Support/ErrorHandling.h" |
33 | | #include "llvm/Support/raw_ostream.h" |
34 | | #include <algorithm> |
35 | | #include <cassert> |
36 | | #include <cstdint> |
37 | | #include <cstring> |
38 | | #include <iterator> |
39 | | #include <set> |
40 | | #include <utility> |
41 | | #include <vector> |
42 | | |
43 | | using namespace llvm; |
44 | | using namespace rdf; |
45 | | |
46 | | // Printing functions. Have them here first, so that the rest of the code |
47 | | // can use them. |
48 | | namespace llvm { |
49 | | namespace rdf { |
50 | | |
51 | 0 | raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) { |
52 | 0 | if (!P.Mask.all()) |
53 | 0 | OS << ':' << PrintLaneMask(P.Mask); |
54 | 0 | return OS; |
55 | 0 | } |
56 | | |
57 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) { |
58 | 0 | auto &TRI = P.G.getTRI(); |
59 | 0 | if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs()) |
60 | 0 | OS << TRI.getName(P.Obj.Reg); |
61 | 0 | else |
62 | 0 | OS << '#' << P.Obj.Reg; |
63 | 0 | OS << PrintLaneMaskOpt(P.Obj.Mask); |
64 | 0 | return OS; |
65 | 0 | } |
66 | | |
67 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) { |
68 | 0 | auto NA = P.G.addr<NodeBase*>(P.Obj); |
69 | 0 | uint16_t Attrs = NA.Addr->getAttrs(); |
70 | 0 | uint16_t Kind = NodeAttrs::kind(Attrs); |
71 | 0 | uint16_t Flags = NodeAttrs::flags(Attrs); |
72 | 0 | switch (NodeAttrs::type(Attrs)) { |
73 | 0 | case NodeAttrs::Code: |
74 | 0 | switch (Kind) { |
75 | 0 | case NodeAttrs::Func: OS << 'f'; break; |
76 | 0 | case NodeAttrs::Block: OS << 'b'; break; |
77 | 0 | case NodeAttrs::Stmt: OS << 's'; break; |
78 | 0 | case NodeAttrs::Phi: OS << 'p'; break; |
79 | 0 | default: OS << "c?"; break; |
80 | 0 | } |
81 | 0 | break; |
82 | 0 | case NodeAttrs::Ref: |
83 | 0 | if (Flags & NodeAttrs::Undef) |
84 | 0 | OS << '/'; |
85 | 0 | if (Flags & NodeAttrs::Dead) |
86 | 0 | OS << '\\'; |
87 | 0 | if (Flags & NodeAttrs::Preserving) |
88 | 0 | OS << '+'; |
89 | 0 | if (Flags & NodeAttrs::Clobbering) |
90 | 0 | OS << '~'; |
91 | 0 | switch (Kind) { |
92 | 0 | case NodeAttrs::Use: OS << 'u'; break; |
93 | 0 | case NodeAttrs::Def: OS << 'd'; break; |
94 | 0 | case NodeAttrs::Block: OS << 'b'; break; |
95 | 0 | default: OS << "r?"; break; |
96 | 0 | } |
97 | 0 | break; |
98 | 0 | default: |
99 | 0 | OS << '?'; |
100 | 0 | break; |
101 | 0 | } |
102 | 0 | OS << P.Obj; |
103 | 0 | if (Flags & NodeAttrs::Shadow) |
104 | 0 | OS << '"'; |
105 | 0 | return OS; |
106 | 0 | } |
107 | | |
108 | | static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA, |
109 | 0 | const DataFlowGraph &G) { |
110 | 0 | OS << Print<NodeId>(RA.Id, G) << '<' |
111 | 0 | << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>'; |
112 | 0 | if (RA.Addr->getFlags() & NodeAttrs::Fixed) |
113 | 0 | OS << '!'; |
114 | 0 | } |
115 | | |
116 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) { |
117 | 0 | printRefHeader(OS, P.Obj, P.G); |
118 | 0 | OS << '('; |
119 | 0 | if (NodeId N = P.Obj.Addr->getReachingDef()) |
120 | 0 | OS << Print<NodeId>(N, P.G); |
121 | 0 | OS << ','; |
122 | 0 | if (NodeId N = P.Obj.Addr->getReachedDef()) |
123 | 0 | OS << Print<NodeId>(N, P.G); |
124 | 0 | OS << ','; |
125 | 0 | if (NodeId N = P.Obj.Addr->getReachedUse()) |
126 | 0 | OS << Print<NodeId>(N, P.G); |
127 | 0 | OS << "):"; |
128 | 0 | if (NodeId N = P.Obj.Addr->getSibling()) |
129 | 0 | OS << Print<NodeId>(N, P.G); |
130 | 0 | return OS; |
131 | 0 | } |
132 | | |
133 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) { |
134 | 0 | printRefHeader(OS, P.Obj, P.G); |
135 | 0 | OS << '('; |
136 | 0 | if (NodeId N = P.Obj.Addr->getReachingDef()) |
137 | 0 | OS << Print<NodeId>(N, P.G); |
138 | 0 | OS << "):"; |
139 | 0 | if (NodeId N = P.Obj.Addr->getSibling()) |
140 | 0 | OS << Print<NodeId>(N, P.G); |
141 | 0 | return OS; |
142 | 0 | } |
143 | | |
144 | | raw_ostream &operator<< (raw_ostream &OS, |
145 | 0 | const Print<NodeAddr<PhiUseNode*>> &P) { |
146 | 0 | printRefHeader(OS, P.Obj, P.G); |
147 | 0 | OS << '('; |
148 | 0 | if (NodeId N = P.Obj.Addr->getReachingDef()) |
149 | 0 | OS << Print<NodeId>(N, P.G); |
150 | 0 | OS << ','; |
151 | 0 | if (NodeId N = P.Obj.Addr->getPredecessor()) |
152 | 0 | OS << Print<NodeId>(N, P.G); |
153 | 0 | OS << "):"; |
154 | 0 | if (NodeId N = P.Obj.Addr->getSibling()) |
155 | 0 | OS << Print<NodeId>(N, P.G); |
156 | 0 | return OS; |
157 | 0 | } |
158 | | |
159 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) { |
160 | 0 | switch (P.Obj.Addr->getKind()) { |
161 | 0 | case NodeAttrs::Def: |
162 | 0 | OS << PrintNode<DefNode*>(P.Obj, P.G); |
163 | 0 | break; |
164 | 0 | case NodeAttrs::Use: |
165 | 0 | if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef) |
166 | 0 | OS << PrintNode<PhiUseNode*>(P.Obj, P.G); |
167 | 0 | else |
168 | 0 | OS << PrintNode<UseNode*>(P.Obj, P.G); |
169 | 0 | break; |
170 | 0 | } |
171 | 0 | return OS; |
172 | 0 | } |
173 | | |
174 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) { |
175 | 0 | unsigned N = P.Obj.size(); |
176 | 0 | for (auto I : P.Obj) { |
177 | 0 | OS << Print<NodeId>(I.Id, P.G); |
178 | 0 | if (--N) |
179 | 0 | OS << ' '; |
180 | 0 | } |
181 | 0 | return OS; |
182 | 0 | } |
183 | | |
184 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) { |
185 | 0 | unsigned N = P.Obj.size(); |
186 | 0 | for (auto I : P.Obj) { |
187 | 0 | OS << Print<NodeId>(I, P.G); |
188 | 0 | if (--N) |
189 | 0 | OS << ' '; |
190 | 0 | } |
191 | 0 | return OS; |
192 | 0 | } |
193 | | |
194 | | namespace { |
195 | | |
196 | | template <typename T> |
197 | | struct PrintListV { |
198 | 0 | PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {} |
199 | | |
200 | | using Type = T; |
201 | | const NodeList &List; |
202 | | const DataFlowGraph &G; |
203 | | }; |
204 | | |
205 | | template <typename T> |
206 | 0 | raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) { |
207 | 0 | unsigned N = P.List.size(); |
208 | 0 | for (NodeAddr<T> A : P.List) { |
209 | 0 | OS << PrintNode<T>(A, P.G); |
210 | 0 | if (--N) |
211 | 0 | OS << ", "; |
212 | 0 | } |
213 | 0 | return OS; |
214 | 0 | } |
215 | | |
216 | | } // end anonymous namespace |
217 | | |
218 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) { |
219 | 0 | OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi [" |
220 | 0 | << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; |
221 | 0 | return OS; |
222 | 0 | } |
223 | | |
224 | 0 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<StmtNode *>> &P) { |
225 | 0 | const MachineInstr &MI = *P.Obj.Addr->getCode(); |
226 | 0 | unsigned Opc = MI.getOpcode(); |
227 | 0 | OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc); |
228 | 0 | // Print the target for calls and branches (for readability). |
229 | 0 | if (MI.isCall() || MI.isBranch()) { |
230 | 0 | MachineInstr::const_mop_iterator T = |
231 | 0 | llvm::find_if(MI.operands(), |
232 | 0 | [] (const MachineOperand &Op) -> bool { |
233 | 0 | return Op.isMBB() || Op.isGlobal() || Op.isSymbol(); |
234 | 0 | }); |
235 | 0 | if (T != MI.operands_end()) { |
236 | 0 | OS << ' '; |
237 | 0 | if (T->isMBB()) |
238 | 0 | OS << printMBBReference(*T->getMBB()); |
239 | 0 | else if (T->isGlobal()) |
240 | 0 | OS << T->getGlobal()->getName(); |
241 | 0 | else if (T->isSymbol()) |
242 | 0 | OS << T->getSymbolName(); |
243 | 0 | } |
244 | 0 | } |
245 | 0 | OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; |
246 | 0 | return OS; |
247 | 0 | } |
248 | | |
249 | | raw_ostream &operator<< (raw_ostream &OS, |
250 | 0 | const Print<NodeAddr<InstrNode*>> &P) { |
251 | 0 | switch (P.Obj.Addr->getKind()) { |
252 | 0 | case NodeAttrs::Phi: |
253 | 0 | OS << PrintNode<PhiNode*>(P.Obj, P.G); |
254 | 0 | break; |
255 | 0 | case NodeAttrs::Stmt: |
256 | 0 | OS << PrintNode<StmtNode*>(P.Obj, P.G); |
257 | 0 | break; |
258 | 0 | default: |
259 | 0 | OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G); |
260 | 0 | break; |
261 | 0 | } |
262 | 0 | return OS; |
263 | 0 | } |
264 | | |
265 | | raw_ostream &operator<< (raw_ostream &OS, |
266 | 0 | const Print<NodeAddr<BlockNode*>> &P) { |
267 | 0 | MachineBasicBlock *BB = P.Obj.Addr->getCode(); |
268 | 0 | unsigned NP = BB->pred_size(); |
269 | 0 | std::vector<int> Ns; |
270 | 0 | auto PrintBBs = [&OS] (std::vector<int> Ns) -> void { |
271 | 0 | unsigned N = Ns.size(); |
272 | 0 | for (int I : Ns) { |
273 | 0 | OS << "%bb." << I; |
274 | 0 | if (--N) |
275 | 0 | OS << ", "; |
276 | 0 | } |
277 | 0 | }; |
278 | 0 |
|
279 | 0 | OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB) |
280 | 0 | << " --- preds(" << NP << "): "; |
281 | 0 | for (MachineBasicBlock *B : BB->predecessors()) |
282 | 0 | Ns.push_back(B->getNumber()); |
283 | 0 | PrintBBs(Ns); |
284 | 0 |
|
285 | 0 | unsigned NS = BB->succ_size(); |
286 | 0 | OS << " succs(" << NS << "): "; |
287 | 0 | Ns.clear(); |
288 | 0 | for (MachineBasicBlock *B : BB->successors()) |
289 | 0 | Ns.push_back(B->getNumber()); |
290 | 0 | PrintBBs(Ns); |
291 | 0 | OS << '\n'; |
292 | 0 |
|
293 | 0 | for (auto I : P.Obj.Addr->members(P.G)) |
294 | 0 | OS << PrintNode<InstrNode*>(I, P.G) << '\n'; |
295 | 0 | return OS; |
296 | 0 | } |
297 | | |
298 | 0 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<FuncNode *>> &P) { |
299 | 0 | OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: " |
300 | 0 | << P.Obj.Addr->getCode()->getName() << '\n'; |
301 | 0 | for (auto I : P.Obj.Addr->members(P.G)) |
302 | 0 | OS << PrintNode<BlockNode*>(I, P.G) << '\n'; |
303 | 0 | OS << "]\n"; |
304 | 0 | return OS; |
305 | 0 | } |
306 | | |
307 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) { |
308 | 0 | OS << '{'; |
309 | 0 | for (auto I : P.Obj) |
310 | 0 | OS << ' ' << Print<RegisterRef>(I, P.G); |
311 | 0 | OS << " }"; |
312 | 0 | return OS; |
313 | 0 | } |
314 | | |
315 | 0 | raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) { |
316 | 0 | P.Obj.print(OS); |
317 | 0 | return OS; |
318 | 0 | } |
319 | | |
320 | | raw_ostream &operator<< (raw_ostream &OS, |
321 | 0 | const Print<DataFlowGraph::DefStack> &P) { |
322 | 0 | for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) { |
323 | 0 | OS << Print<NodeId>(I->Id, P.G) |
324 | 0 | << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>'; |
325 | 0 | I.down(); |
326 | 0 | if (I != E) |
327 | 0 | OS << ' '; |
328 | 0 | } |
329 | 0 | return OS; |
330 | 0 | } |
331 | | |
332 | | } // end namespace rdf |
333 | | } // end namespace llvm |
334 | | |
335 | | // Node allocation functions. |
336 | | // |
337 | | // Node allocator is like a slab memory allocator: it allocates blocks of |
338 | | // memory in sizes that are multiples of the size of a node. Each block has |
339 | | // the same size. Nodes are allocated from the currently active block, and |
340 | | // when it becomes full, a new one is created. |
341 | | // There is a mapping scheme between node id and its location in a block, |
342 | | // and within that block is described in the header file. |
343 | | // |
344 | 6.71k | void NodeAllocator::startNewBlock() { |
345 | 6.71k | void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize); |
346 | 6.71k | char *P = static_cast<char*>(T); |
347 | 6.71k | Blocks.push_back(P); |
348 | 6.71k | // Check if the block index is still within the allowed range, i.e. less |
349 | 6.71k | // than 2^N, where N is the number of bits in NodeId for the block index. |
350 | 6.71k | // BitsPerIndex is the number of bits per node index. |
351 | 6.71k | assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) && |
352 | 6.71k | "Out of bits for block index"); |
353 | 6.71k | ActiveEnd = P; |
354 | 6.71k | } |
355 | | |
356 | 309k | bool NodeAllocator::needNewBlock() { |
357 | 309k | if (Blocks.empty()) |
358 | 6.70k | return true; |
359 | 302k | |
360 | 302k | char *ActiveBegin = Blocks.back(); |
361 | 302k | uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize; |
362 | 302k | return Index >= NodesPerBlock; |
363 | 302k | } |
364 | | |
365 | 309k | NodeAddr<NodeBase*> NodeAllocator::New() { |
366 | 309k | if (needNewBlock()) |
367 | 6.71k | startNewBlock(); |
368 | 309k | |
369 | 309k | uint32_t ActiveB = Blocks.size()-1; |
370 | 309k | uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize; |
371 | 309k | NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd), |
372 | 309k | makeId(ActiveB, Index) }; |
373 | 309k | ActiveEnd += NodeMemSize; |
374 | 309k | return NA; |
375 | 309k | } |
376 | | |
377 | 100k | NodeId NodeAllocator::id(const NodeBase *P) const { |
378 | 100k | uintptr_t A = reinterpret_cast<uintptr_t>(P); |
379 | 101k | for (unsigned i = 0, n = Blocks.size(); i != n; ++i986 ) { |
380 | 101k | uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]); |
381 | 101k | if (A < B || A >= B + NodesPerBlock*NodeMemSize101k ) |
382 | 986 | continue; |
383 | 100k | uint32_t Idx = (A-B)/NodeMemSize; |
384 | 100k | return makeId(i, Idx); |
385 | 100k | } |
386 | 100k | llvm_unreachable0 ("Invalid node address"); |
387 | 100k | } |
388 | | |
389 | 6.70k | void NodeAllocator::clear() { |
390 | 6.70k | MemPool.Reset(); |
391 | 6.70k | Blocks.clear(); |
392 | 6.70k | ActiveEnd = nullptr; |
393 | 6.70k | } |
394 | | |
395 | | // Insert node NA after "this" in the circular chain. |
396 | 194k | void NodeBase::append(NodeAddr<NodeBase*> NA) { |
397 | 194k | NodeId Nx = Next; |
398 | 194k | // If NA is already "next", do nothing. |
399 | 194k | if (Next != NA.Id) { |
400 | 194k | Next = NA.Id; |
401 | 194k | NA.Addr->Next = Nx; |
402 | 194k | } |
403 | 194k | } |
404 | | |
405 | | // Fundamental node manipulator functions. |
406 | | |
407 | | // Obtain the register reference from a reference node. |
408 | 2.97M | RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const { |
409 | 2.97M | assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); |
410 | 2.97M | if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef) |
411 | 743k | return G.unpack(Ref.PR); |
412 | 2.23M | assert(Ref.Op != nullptr); |
413 | 2.23M | return G.makeRegRef(*Ref.Op); |
414 | 2.23M | } |
415 | | |
416 | | // Set the register reference in the reference node directly (for references |
417 | | // in phi nodes). |
418 | 48.9k | void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) { |
419 | 48.9k | assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); |
420 | 48.9k | assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef); |
421 | 48.9k | Ref.PR = G.pack(RR); |
422 | 48.9k | } |
423 | | |
424 | | // Set the register reference in the reference node based on a machine |
425 | | // operand (for references in statement nodes). |
426 | 144k | void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) { |
427 | 144k | assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); |
428 | 144k | assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)); |
429 | 144k | (void)G; |
430 | 144k | Ref.Op = Op; |
431 | 144k | } |
432 | | |
433 | | // Get the owner of a given reference node. |
434 | 481k | NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) { |
435 | 481k | NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); |
436 | 481k | |
437 | 1.81M | while (NA.Addr != this) { |
438 | 1.81M | if (NA.Addr->getType() == NodeAttrs::Code) |
439 | 481k | return NA; |
440 | 1.32M | NA = G.addr<NodeBase*>(NA.Addr->getNext()); |
441 | 1.32M | } |
442 | 481k | llvm_unreachable0 ("No owner in circular list"); |
443 | 481k | } |
444 | | |
445 | | // Connect the def node to the reaching def node. |
446 | 51.0k | void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { |
447 | 51.0k | Ref.RD = DA.Id; |
448 | 51.0k | Ref.Sib = DA.Addr->getReachedDef(); |
449 | 51.0k | DA.Addr->setReachedDef(Self); |
450 | 51.0k | } |
451 | | |
452 | | // Connect the use node to the reaching def node. |
453 | 101k | void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { |
454 | 101k | Ref.RD = DA.Id; |
455 | 101k | Ref.Sib = DA.Addr->getReachedUse(); |
456 | 101k | DA.Addr->setReachedUse(Self); |
457 | 101k | } |
458 | | |
459 | | // Get the first member of the code node. |
460 | 1.04M | NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const { |
461 | 1.04M | if (Code.FirstM == 0) |
462 | 5.27k | return NodeAddr<NodeBase*>(); |
463 | 1.03M | return G.addr<NodeBase*>(Code.FirstM); |
464 | 1.03M | } |
465 | | |
466 | | // Get the last member of the code node. |
467 | 264k | NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const { |
468 | 264k | if (Code.LastM == 0) |
469 | 100k | return NodeAddr<NodeBase*>(); |
470 | 163k | return G.addr<NodeBase*>(Code.LastM); |
471 | 163k | } |
472 | | |
473 | | // Add node NA at the end of the member list of the given code node. |
474 | 264k | void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { |
475 | 264k | NodeAddr<NodeBase*> ML = getLastMember(G); |
476 | 264k | if (ML.Id != 0) { |
477 | 163k | ML.Addr->append(NA); |
478 | 163k | } else { |
479 | 100k | Code.FirstM = NA.Id; |
480 | 100k | NodeId Self = G.id(this); |
481 | 100k | NA.Addr->setNext(Self); |
482 | 100k | } |
483 | 264k | Code.LastM = NA.Id; |
484 | 264k | } |
485 | | |
486 | | // Add node NA after member node MA in the given code node. |
487 | | void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA, |
488 | 31.0k | const DataFlowGraph &G) { |
489 | 31.0k | MA.Addr->append(NA); |
490 | 31.0k | if (Code.LastM == MA.Id) |
491 | 3.84k | Code.LastM = NA.Id; |
492 | 31.0k | } |
493 | | |
494 | | // Remove member node NA from the given code node. |
495 | 17.1k | void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { |
496 | 17.1k | NodeAddr<NodeBase*> MA = getFirstMember(G); |
497 | 17.1k | assert(MA.Id != 0); |
498 | 17.1k | |
499 | 17.1k | // Special handling if the member to remove is the first member. |
500 | 17.1k | if (MA.Id == NA.Id) { |
501 | 6.89k | if (Code.LastM == MA.Id) { |
502 | 4.13k | // If it is the only member, set both first and last to 0. |
503 | 4.13k | Code.FirstM = Code.LastM = 0; |
504 | 4.13k | } else { |
505 | 2.75k | // Otherwise, advance the first member. |
506 | 2.75k | Code.FirstM = MA.Addr->getNext(); |
507 | 2.75k | } |
508 | 6.89k | return; |
509 | 6.89k | } |
510 | 10.2k | |
511 | 13.2k | while (10.2k MA.Addr != this) { |
512 | 13.2k | NodeId MX = MA.Addr->getNext(); |
513 | 13.2k | if (MX == NA.Id) { |
514 | 10.2k | MA.Addr->setNext(NA.Addr->getNext()); |
515 | 10.2k | // If the member to remove happens to be the last one, update the |
516 | 10.2k | // LastM indicator. |
517 | 10.2k | if (Code.LastM == NA.Id) |
518 | 4.06k | Code.LastM = MA.Id; |
519 | 10.2k | return; |
520 | 10.2k | } |
521 | 2.96k | MA = G.addr<NodeBase*>(MX); |
522 | 2.96k | } |
523 | 10.2k | llvm_unreachable0 ("No such member"); |
524 | 10.2k | } |
525 | | |
526 | | // Return the list of all members of the code node. |
527 | 241k | NodeList CodeNode::members(const DataFlowGraph &G) const { |
528 | 935k | static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; }; |
529 | 241k | return members_if(True, G); |
530 | 241k | } |
531 | | |
532 | | // Return the owner of the given instr node. |
533 | 29.3k | NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) { |
534 | 29.3k | NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); |
535 | 29.3k | |
536 | 368k | while (NA.Addr != this) { |
537 | 368k | assert(NA.Addr->getType() == NodeAttrs::Code); |
538 | 368k | if (NA.Addr->getKind() == NodeAttrs::Block) |
539 | 29.3k | return NA; |
540 | 338k | NA = G.addr<NodeBase*>(NA.Addr->getNext()); |
541 | 338k | } |
542 | 29.3k | llvm_unreachable0 ("No owner in circular list"); |
543 | 29.3k | } |
544 | | |
545 | | // Add the phi node PA to the given block node. |
546 | 23.6k | void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) { |
547 | 23.6k | NodeAddr<NodeBase*> M = getFirstMember(G); |
548 | 23.6k | if (M.Id == 0) { |
549 | 34 | addMember(PA, G); |
550 | 34 | return; |
551 | 34 | } |
552 | 23.6k | |
553 | 23.6k | assert(M.Addr->getType() == NodeAttrs::Code); |
554 | 23.6k | if (M.Addr->getKind() == NodeAttrs::Stmt) { |
555 | 7.46k | // If the first member of the block is a statement, insert the phi as |
556 | 7.46k | // the first member. |
557 | 7.46k | Code.FirstM = PA.Id; |
558 | 7.46k | PA.Addr->setNext(M.Id); |
559 | 16.1k | } else { |
560 | 16.1k | // If the first member is a phi, find the last phi, and append PA to it. |
561 | 16.1k | assert(M.Addr->getKind() == NodeAttrs::Phi); |
562 | 16.1k | NodeAddr<NodeBase*> MN = M; |
563 | 80.2k | do { |
564 | 80.2k | M = MN; |
565 | 80.2k | MN = G.addr<NodeBase*>(M.Addr->getNext()); |
566 | 80.2k | assert(MN.Addr->getType() == NodeAttrs::Code); |
567 | 80.2k | } while (MN.Addr->getKind() == NodeAttrs::Phi); |
568 | 16.1k | |
569 | 16.1k | // M is the last phi. |
570 | 16.1k | addMemberAfter(M, PA, G); |
571 | 16.1k | } |
572 | 23.6k | } |
573 | | |
574 | | // Find the block node corresponding to the machine basic block BB in the |
575 | | // given func node. |
576 | | NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB, |
577 | 9.02k | const DataFlowGraph &G) const { |
578 | 41.2k | auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool { |
579 | 41.2k | return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB; |
580 | 41.2k | }; |
581 | 9.02k | NodeList Ms = members_if(EqBB, G); |
582 | 9.02k | if (!Ms.empty()) |
583 | 9.02k | return Ms[0]; |
584 | 0 | return NodeAddr<BlockNode*>(); |
585 | 0 | } |
586 | | |
587 | | // Get the block node for the entry block in the given function. |
588 | 6.70k | NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) { |
589 | 6.70k | MachineBasicBlock *EntryB = &getCode()->front(); |
590 | 6.70k | return findBlock(EntryB, G); |
591 | 6.70k | } |
592 | | |
593 | | // Target operand information. |
594 | | // |
595 | | |
596 | | // For a given instruction, check if there are any bits of RR that can remain |
597 | | // unchanged across this def. |
598 | | bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum) |
599 | 63.2k | const { |
600 | 63.2k | return TII.isPredicated(In); |
601 | 63.2k | } |
602 | | |
603 | | // Check if the definition of RR produces an unspecified value. |
604 | | bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum) |
605 | 63.2k | const { |
606 | 63.2k | const MachineOperand &Op = In.getOperand(OpNum); |
607 | 63.2k | if (Op.isRegMask()) |
608 | 0 | return true; |
609 | 63.2k | assert(Op.isReg()); |
610 | 63.2k | if (In.isCall()) |
611 | 4.73k | if (Op.isDef() && Op.isDead()) |
612 | 2.77k | return true; |
613 | 60.5k | return false; |
614 | 60.5k | } |
615 | | |
616 | | // Check if the given instruction specifically requires |
617 | | bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum) |
618 | 145k | const { |
619 | 145k | if (In.isCall() || In.isReturn()137k || In.isInlineAsm()119k ) |
620 | 26.2k | return true; |
621 | 119k | // Check for a tail call. |
622 | 119k | if (In.isBranch()) |
623 | 7.34k | for (const MachineOperand &O : In.operands()) |
624 | 23.2k | if (O.isGlobal() || O.isSymbol()) |
625 | 0 | return true; |
626 | 119k | |
627 | 119k | const MCInstrDesc &D = In.getDesc(); |
628 | 119k | if (!D.getImplicitDefs() && !D.getImplicitUses()96.8k ) |
629 | 92.9k | return false; |
630 | 26.4k | const MachineOperand &Op = In.getOperand(OpNum); |
631 | 26.4k | // If there is a sub-register, treat the operand as non-fixed. Currently, |
632 | 26.4k | // fixed registers are those that are listed in the descriptor as implicit |
633 | 26.4k | // uses or defs, and those lists do not allow sub-registers. |
634 | 26.4k | if (Op.getSubReg() != 0) |
635 | 0 | return false; |
636 | 26.4k | RegisterId Reg = Op.getReg(); |
637 | 26.4k | const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs()14.6k |
638 | 26.4k | : D.getImplicitUses()11.7k ; |
639 | 26.4k | if (!ImpR) |
640 | 3.70k | return false; |
641 | 36.1k | while (22.7k *ImpR) |
642 | 34.2k | if (*ImpR++ == Reg) |
643 | 20.8k | return true; |
644 | 22.7k | return false1.94k ; |
645 | 22.7k | } |
646 | | |
647 | | // |
648 | | // The data flow graph construction. |
649 | | // |
650 | | |
651 | | DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii, |
652 | | const TargetRegisterInfo &tri, const MachineDominatorTree &mdt, |
653 | | const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi) |
654 | | : MF(mf), TII(tii), TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(toi), |
655 | 6.69k | LiveIns(PRI) { |
656 | 6.69k | } |
657 | | |
658 | | // The implementation of the definition stack. |
659 | | // Each register reference has its own definition stack. In particular, |
660 | | // for a register references "Reg" and "Reg:subreg" will each have their |
661 | | // own definition stacks. |
662 | | |
663 | | // Construct a stack iterator. |
664 | | DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S, |
665 | 1.02M | bool Top) : DS(S) { |
666 | 1.02M | if (!Top) { |
667 | 514k | // Initialize to bottom. |
668 | 514k | Pos = 0; |
669 | 514k | return; |
670 | 514k | } |
671 | 514k | // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty). |
672 | 514k | Pos = DS.Stack.size(); |
673 | 797k | while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1])) |
674 | 283k | Pos--; |
675 | 514k | } |
676 | | |
677 | | // Return the size of the stack, including block delimiters. |
678 | 0 | unsigned DataFlowGraph::DefStack::size() const { |
679 | 0 | unsigned S = 0; |
680 | 0 | for (auto I = top(), E = bottom(); I != E; I.down()) |
681 | 0 | S++; |
682 | 0 | return S; |
683 | 0 | } |
684 | | |
685 | | // Remove the top entry from the stack. Remove all intervening delimiters |
686 | | // so that after this, the stack is either empty, or the top of the stack |
687 | | // is a non-delimiter. |
688 | 0 | void DataFlowGraph::DefStack::pop() { |
689 | 0 | assert(!empty()); |
690 | 0 | unsigned P = nextDown(Stack.size()); |
691 | 0 | Stack.resize(P); |
692 | 0 | } |
693 | | |
694 | | // Push a delimiter for block node N on the stack. |
695 | 239k | void DataFlowGraph::DefStack::start_block(NodeId N) { |
696 | 239k | assert(N != 0); |
697 | 239k | Stack.push_back(NodeAddr<DefNode*>(nullptr, N)); |
698 | 239k | } |
699 | | |
700 | | // Remove all nodes from the top of the stack, until the delimited for |
701 | | // block node N is encountered. Remove the delimiter as well. In effect, |
702 | | // this will remove from the stack all definitions from block N. |
703 | 425k | void DataFlowGraph::DefStack::clear_block(NodeId N) { |
704 | 425k | assert(N != 0); |
705 | 425k | unsigned P = Stack.size(); |
706 | 1.04M | while (P > 0) { |
707 | 856k | bool Found = isDelimiter(Stack[P-1], N); |
708 | 856k | P--; |
709 | 856k | if (Found) |
710 | 239k | break; |
711 | 856k | } |
712 | 425k | // This will also remove the delimiter, if found. |
713 | 425k | Stack.resize(P); |
714 | 425k | } |
715 | | |
716 | | // Move the stack iterator up by one. |
717 | 0 | unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const { |
718 | 0 | // Get the next valid position after P (skipping all delimiters). |
719 | 0 | // The input position P does not have to point to a non-delimiter. |
720 | 0 | unsigned SS = Stack.size(); |
721 | 0 | bool IsDelim; |
722 | 0 | assert(P < SS); |
723 | 0 | do { |
724 | 0 | P++; |
725 | 0 | IsDelim = isDelimiter(Stack[P-1]); |
726 | 0 | } while (P < SS && IsDelim); |
727 | 0 | assert(!IsDelim); |
728 | 0 | return P; |
729 | 0 | } |
730 | | |
731 | | // Move the stack iterator down by one. |
732 | 28.5k | unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const { |
733 | 28.5k | // Get the preceding valid position before P (skipping all delimiters). |
734 | 28.5k | // The input position P does not have to point to a non-delimiter. |
735 | 28.5k | assert(P > 0 && P <= Stack.size()); |
736 | 28.5k | bool IsDelim = isDelimiter(Stack[P-1]); |
737 | 30.2k | do { |
738 | 30.2k | if (--P == 0) |
739 | 1.00k | break; |
740 | 29.2k | IsDelim = isDelimiter(Stack[P-1]); |
741 | 29.2k | } while (P > 0 && IsDelim); |
742 | 28.5k | assert(!IsDelim); |
743 | 28.5k | return P; |
744 | 28.5k | } |
745 | | |
746 | | // Register information. |
747 | | |
748 | 17.8k | RegisterSet DataFlowGraph::getLandingPadLiveIns() const { |
749 | 17.8k | RegisterSet LR; |
750 | 17.8k | const Function &F = MF.getFunction(); |
751 | 17.8k | const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn()134 |
752 | 17.8k | : nullptr17.6k ; |
753 | 17.8k | const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); |
754 | 17.8k | if (RegisterId R = TLI.getExceptionPointerRegister(PF)) |
755 | 17.8k | LR.insert(RegisterRef(R)); |
756 | 17.8k | if (RegisterId R = TLI.getExceptionSelectorRegister(PF)) |
757 | 17.8k | LR.insert(RegisterRef(R)); |
758 | 17.8k | return LR; |
759 | 17.8k | } |
760 | | |
761 | | // Node management functions. |
762 | | |
763 | | // Get the pointer to the node with the id N. |
764 | 8.46M | NodeBase *DataFlowGraph::ptr(NodeId N) const { |
765 | 8.46M | if (N == 0) |
766 | 0 | return nullptr; |
767 | 8.46M | return Memory.ptr(N); |
768 | 8.46M | } |
769 | | |
770 | | // Get the id of the node at the address P. |
771 | 100k | NodeId DataFlowGraph::id(const NodeBase *P) const { |
772 | 100k | if (P == nullptr) |
773 | 0 | return 0; |
774 | 100k | return Memory.id(P); |
775 | 100k | } |
776 | | |
777 | | // Allocate a new node and set the attributes to Attrs. |
778 | 309k | NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) { |
779 | 309k | NodeAddr<NodeBase*> P = Memory.New(); |
780 | 309k | P.Addr->init(); |
781 | 309k | P.Addr->setAttrs(Attrs); |
782 | 309k | return P; |
783 | 309k | } |
784 | | |
785 | | // Make a copy of the given node B, except for the data-flow links, which |
786 | | // are set to 0. |
787 | 14.8k | NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) { |
788 | 14.8k | NodeAddr<NodeBase*> NA = newNode(0); |
789 | 14.8k | memcpy(NA.Addr, B.Addr, sizeof(NodeBase)); |
790 | 14.8k | // Ref nodes need to have the data-flow links reset. |
791 | 14.8k | if (NA.Addr->getType() == NodeAttrs::Ref) { |
792 | 14.8k | NodeAddr<RefNode*> RA = NA; |
793 | 14.8k | RA.Addr->setReachingDef(0); |
794 | 14.8k | RA.Addr->setSibling(0); |
795 | 14.8k | if (NA.Addr->getKind() == NodeAttrs::Def) { |
796 | 7.60k | NodeAddr<DefNode*> DA = NA; |
797 | 7.60k | DA.Addr->setReachedDef(0); |
798 | 7.60k | DA.Addr->setReachedUse(0); |
799 | 7.60k | } |
800 | 14.8k | } |
801 | 14.8k | return NA; |
802 | 14.8k | } |
803 | | |
804 | | // Allocation routines for specific node types/kinds. |
805 | | |
806 | | NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner, |
807 | 82.4k | MachineOperand &Op, uint16_t Flags) { |
808 | 82.4k | NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); |
809 | 82.4k | UA.Addr->setRegRef(&Op, *this); |
810 | 82.4k | return UA; |
811 | 82.4k | } |
812 | | |
813 | | NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner, |
814 | 25.3k | RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) { |
815 | 25.3k | NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); |
816 | 25.3k | assert(Flags & NodeAttrs::PhiRef); |
817 | 25.3k | PUA.Addr->setRegRef(RR, *this); |
818 | 25.3k | PUA.Addr->setPredecessor(PredB.Id); |
819 | 25.3k | return PUA; |
820 | 25.3k | } |
821 | | |
822 | | NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, |
823 | 61.9k | MachineOperand &Op, uint16_t Flags) { |
824 | 61.9k | NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); |
825 | 61.9k | DA.Addr->setRegRef(&Op, *this); |
826 | 61.9k | return DA; |
827 | 61.9k | } |
828 | | |
829 | | NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, |
830 | 23.6k | RegisterRef RR, uint16_t Flags) { |
831 | 23.6k | NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); |
832 | 23.6k | assert(Flags & NodeAttrs::PhiRef); |
833 | 23.6k | DA.Addr->setRegRef(RR, *this); |
834 | 23.6k | return DA; |
835 | 23.6k | } |
836 | | |
837 | 23.6k | NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) { |
838 | 23.6k | NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi); |
839 | 23.6k | Owner.Addr->addPhi(PA, *this); |
840 | 23.6k | return PA; |
841 | 23.6k | } |
842 | | |
843 | | NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner, |
844 | 59.5k | MachineInstr *MI) { |
845 | 59.5k | NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt); |
846 | 59.5k | SA.Addr->setCode(MI); |
847 | 59.5k | Owner.Addr->addMember(SA, *this); |
848 | 59.5k | return SA; |
849 | 59.5k | } |
850 | | |
851 | | NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner, |
852 | 11.1k | MachineBasicBlock *BB) { |
853 | 11.1k | NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block); |
854 | 11.1k | BA.Addr->setCode(BB); |
855 | 11.1k | Owner.Addr->addMember(BA, *this); |
856 | 11.1k | return BA; |
857 | 11.1k | } |
858 | | |
859 | 6.70k | NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) { |
860 | 6.70k | NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func); |
861 | 6.70k | FA.Addr->setCode(MF); |
862 | 6.70k | return FA; |
863 | 6.70k | } |
864 | | |
865 | | // Build the data flow graph. |
866 | 6.70k | void DataFlowGraph::build(unsigned Options) { |
867 | 6.70k | reset(); |
868 | 6.70k | Func = newFunc(&MF); |
869 | 6.70k | |
870 | 6.70k | if (MF.empty()) |
871 | 0 | return; |
872 | 6.70k | |
873 | 11.1k | for (MachineBasicBlock &B : MF)6.70k { |
874 | 11.1k | NodeAddr<BlockNode*> BA = newBlock(Func, &B); |
875 | 11.1k | BlockNodes.insert(std::make_pair(&B, BA)); |
876 | 59.5k | for (MachineInstr &I : B) { |
877 | 59.5k | if (I.isDebugInstr()) |
878 | 42 | continue; |
879 | 59.5k | buildStmt(BA, I); |
880 | 59.5k | } |
881 | 11.1k | } |
882 | 6.70k | |
883 | 6.70k | NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this); |
884 | 6.70k | NodeList Blocks = Func.Addr->members(*this); |
885 | 6.70k | |
886 | 6.70k | // Collect information about block references. |
887 | 6.70k | RegisterSet AllRefs; |
888 | 6.70k | for (NodeAddr<BlockNode*> BA : Blocks) |
889 | 11.1k | for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) |
890 | 59.5k | for (NodeAddr<RefNode*> RA : IA.Addr->members(*this)) |
891 | 144k | AllRefs.insert(RA.Addr->getRegRef(*this)); |
892 | 6.70k | |
893 | 6.70k | // Collect function live-ins and entry block live-ins. |
894 | 6.70k | MachineRegisterInfo &MRI = MF.getRegInfo(); |
895 | 6.70k | MachineBasicBlock &EntryB = *EA.Addr->getCode(); |
896 | 6.70k | assert(EntryB.pred_empty() && "Function entry block has predecessors"); |
897 | 6.70k | for (std::pair<unsigned,unsigned> P : MRI.liveins()) |
898 | 9.64k | LiveIns.insert(RegisterRef(P.first)); |
899 | 6.70k | if (MRI.tracksLiveness()) { |
900 | 6.70k | for (auto I : EntryB.liveins()) |
901 | 9.75k | LiveIns.insert(RegisterRef(I.PhysReg, I.LaneMask)); |
902 | 6.70k | } |
903 | 6.70k | |
904 | 6.70k | // Add function-entry phi nodes for the live-in registers. |
905 | 6.70k | //for (std::pair<RegisterId,LaneBitmask> P : LiveIns) { |
906 | 18.5k | for (auto I = LiveIns.rr_begin(), E = LiveIns.rr_end(); I != E; ++I11.8k ) { |
907 | 11.8k | RegisterRef RR = *I; |
908 | 11.8k | NodeAddr<PhiNode*> PA = newPhi(EA); |
909 | 11.8k | uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; |
910 | 11.8k | NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); |
911 | 11.8k | PA.Addr->addMember(DA, *this); |
912 | 11.8k | } |
913 | 6.70k | |
914 | 6.70k | // Add phis for landing pads. |
915 | 6.70k | // Landing pads, unlike usual backs blocks, are not entered through |
916 | 6.70k | // branches in the program, or fall-throughs from other blocks. They |
917 | 6.70k | // are entered from the exception handling runtime and target's ABI |
918 | 6.70k | // may define certain registers as defined on entry to such a block. |
919 | 6.70k | RegisterSet EHRegs = getLandingPadLiveIns(); |
920 | 6.70k | if (!EHRegs.empty()) { |
921 | 11.1k | for (NodeAddr<BlockNode*> BA : Blocks) { |
922 | 11.1k | const MachineBasicBlock &B = *BA.Addr->getCode(); |
923 | 11.1k | if (!B.isEHPad()) |
924 | 11.0k | continue; |
925 | 22 | |
926 | 22 | // Prepare a list of NodeIds of the block's predecessors. |
927 | 22 | NodeList Preds; |
928 | 22 | for (MachineBasicBlock *PB : B.predecessors()) |
929 | 22 | Preds.push_back(findBlock(PB)); |
930 | 22 | |
931 | 22 | // Build phi nodes for each live-in. |
932 | 44 | for (RegisterRef RR : EHRegs) { |
933 | 44 | NodeAddr<PhiNode*> PA = newPhi(BA); |
934 | 44 | uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; |
935 | 44 | // Add def: |
936 | 44 | NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); |
937 | 44 | PA.Addr->addMember(DA, *this); |
938 | 44 | // Add uses (no reaching defs for phi uses): |
939 | 44 | for (NodeAddr<BlockNode*> PBA : Preds) { |
940 | 44 | NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); |
941 | 44 | PA.Addr->addMember(PUA, *this); |
942 | 44 | } |
943 | 44 | } |
944 | 22 | } |
945 | 6.70k | } |
946 | 6.70k | |
947 | 6.70k | // Build a map "PhiM" which will contain, for each block, the set |
948 | 6.70k | // of references that will require phi definitions in that block. |
949 | 6.70k | BlockRefsMap PhiM; |
950 | 6.70k | for (NodeAddr<BlockNode*> BA : Blocks) |
951 | 11.1k | recordDefsForDF(PhiM, BA); |
952 | 6.70k | for (NodeAddr<BlockNode*> BA : Blocks) |
953 | 11.1k | buildPhis(PhiM, AllRefs, BA); |
954 | 6.70k | |
955 | 6.70k | // Link all the refs. This will recursively traverse the dominator tree. |
956 | 6.70k | DefStackMap DM; |
957 | 6.70k | linkBlockRefs(DM, EA); |
958 | 6.70k | |
959 | 6.70k | // Finally, remove all unused phi nodes. |
960 | 6.70k | if (!(Options & BuildOptions::KeepDeadPhis)) |
961 | 9 | removeUnusedPhis(); |
962 | 6.70k | } |
963 | | |
964 | 2.13M | RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const { |
965 | 2.13M | assert(PhysicalRegisterInfo::isRegMaskId(Reg) || |
966 | 2.13M | TargetRegisterInfo::isPhysicalRegister(Reg)); |
967 | 2.13M | assert(Reg != 0); |
968 | 2.13M | if (Sub != 0) |
969 | 2 | Reg = TRI.getSubReg(Reg, Sub); |
970 | 2.13M | return RegisterRef(Reg); |
971 | 2.13M | } |
972 | | |
973 | 2.26M | RegisterRef DataFlowGraph::makeRegRef(const MachineOperand &Op) const { |
974 | 2.26M | assert(Op.isReg() || Op.isRegMask()); |
975 | 2.26M | if (Op.isReg()) |
976 | 2.13M | return makeRegRef(Op.getReg(), Op.getSubReg()); |
977 | 131k | return RegisterRef(PRI.getRegMaskId(Op.getRegMask()), LaneBitmask::getAll()); |
978 | 131k | } |
979 | | |
980 | 0 | RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const { |
981 | 0 | if (AR.Reg == BR.Reg) { |
982 | 0 | LaneBitmask M = AR.Mask & BR.Mask; |
983 | 0 | return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef(); |
984 | 0 | } |
985 | | #ifndef NDEBUG |
986 | | // RegisterRef NAR = PRI.normalize(AR); |
987 | | // RegisterRef NBR = PRI.normalize(BR); |
988 | | // assert(NAR.Reg != NBR.Reg); |
989 | | #endif |
990 | | // This isn't strictly correct, because the overlap may happen in the |
991 | 0 | // part masked out. |
992 | 0 | if (PRI.alias(AR, BR)) |
993 | 0 | return AR; |
994 | 0 | return RegisterRef(); |
995 | 0 | } |
996 | | |
997 | | // For each stack in the map DefM, push the delimiter for block B on it. |
998 | 11.1k | void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) { |
999 | 11.1k | // Push block delimiters. |
1000 | 250k | for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I239k ) |
1001 | 239k | I->second.start_block(B); |
1002 | 11.1k | } |
1003 | | |
1004 | | // Remove all definitions coming from block B from each stack in DefM. |
1005 | 11.1k | void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) { |
1006 | 11.1k | // Pop all defs from this block from the definition stack. Defs that were |
1007 | 11.1k | // added to the map during the traversal of instructions will not have a |
1008 | 11.1k | // delimiter, but for those, the whole stack will be emptied. |
1009 | 436k | for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I425k ) |
1010 | 425k | I->second.clear_block(B); |
1011 | 11.1k | |
1012 | 11.1k | // Finally, remove empty stacks from the map. |
1013 | 436k | for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI425k ) { |
1014 | 425k | NextI = std::next(I); |
1015 | 425k | // This preserves the validity of iterators other than I. |
1016 | 425k | if (I->second.empty()) |
1017 | 186k | DefM.erase(I); |
1018 | 425k | } |
1019 | 11.1k | } |
1020 | | |
1021 | | // Push all definitions from the instruction node IA to an appropriate |
1022 | | // stack in DefM. |
1023 | 0 | void DataFlowGraph::pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { |
1024 | 0 | pushClobbers(IA, DefM); |
1025 | 0 | pushDefs(IA, DefM); |
1026 | 0 | } |
1027 | | |
1028 | | // Push all definitions from the instruction node IA to an appropriate |
1029 | | // stack in DefM. |
1030 | 83.1k | void DataFlowGraph::pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { |
1031 | 83.1k | NodeSet Visited; |
1032 | 83.1k | std::set<RegisterId> Defined; |
1033 | 83.1k | |
1034 | 83.1k | // The important objectives of this function are: |
1035 | 83.1k | // - to be able to handle instructions both while the graph is being |
1036 | 83.1k | // constructed, and after the graph has been constructed, and |
1037 | 83.1k | // - maintain proper ordering of definitions on the stack for each |
1038 | 83.1k | // register reference: |
1039 | 83.1k | // - if there are two or more related defs in IA (i.e. coming from |
1040 | 83.1k | // the same machine operand), then only push one def on the stack, |
1041 | 83.1k | // - if there are multiple unrelated defs of non-overlapping |
1042 | 83.1k | // subregisters of S, then the stack for S will have both (in an |
1043 | 83.1k | // unspecified order), but the order does not matter from the data- |
1044 | 83.1k | // -flow perspective. |
1045 | 83.1k | |
1046 | 91.3k | for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { |
1047 | 91.3k | if (Visited.count(DA.Id)) |
1048 | 5.73k | continue; |
1049 | 85.6k | if (!(DA.Addr->getFlags() & NodeAttrs::Clobbering)) |
1050 | 84.1k | continue; |
1051 | 1.43k | |
1052 | 1.43k | NodeList Rel = getRelatedRefs(IA, DA); |
1053 | 1.43k | NodeAddr<DefNode*> PDA = Rel.front(); |
1054 | 1.43k | RegisterRef RR = PDA.Addr->getRegRef(*this); |
1055 | 1.43k | |
1056 | 1.43k | // Push the definition on the stack for the register and all aliases. |
1057 | 1.43k | // The def stack traversal in linkNodeUp will check the exact aliasing. |
1058 | 1.43k | DefM[RR.Reg].push(DA); |
1059 | 1.43k | Defined.insert(RR.Reg); |
1060 | 255k | for (RegisterId A : PRI.getAliasSet(RR.Reg)) { |
1061 | 255k | // Check that we don't push the same def twice. |
1062 | 255k | assert(A != RR.Reg); |
1063 | 255k | if (!Defined.count(A)) |
1064 | 255k | DefM[A].push(DA); |
1065 | 255k | } |
1066 | 1.43k | // Mark all the related defs as visited. |
1067 | 1.43k | for (NodeAddr<NodeBase*> T : Rel) |
1068 | 7.16k | Visited.insert(T.Id); |
1069 | 1.43k | } |
1070 | 83.1k | } |
1071 | | |
1072 | | // Push all definitions from the instruction node IA to an appropriate |
1073 | | // stack in DefM. |
1074 | 83.1k | void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { |
1075 | 83.1k | NodeSet Visited; |
1076 | | #ifndef NDEBUG |
1077 | | std::set<RegisterId> Defined; |
1078 | | #endif |
1079 | | |
1080 | 83.1k | // The important objectives of this function are: |
1081 | 83.1k | // - to be able to handle instructions both while the graph is being |
1082 | 83.1k | // constructed, and after the graph has been constructed, and |
1083 | 83.1k | // - maintain proper ordering of definitions on the stack for each |
1084 | 83.1k | // register reference: |
1085 | 83.1k | // - if there are two or more related defs in IA (i.e. coming from |
1086 | 83.1k | // the same machine operand), then only push one def on the stack, |
1087 | 83.1k | // - if there are multiple unrelated defs of non-overlapping |
1088 | 83.1k | // subregisters of S, then the stack for S will have both (in an |
1089 | 83.1k | // unspecified order), but the order does not matter from the data- |
1090 | 83.1k | // -flow perspective. |
1091 | 83.1k | |
1092 | 93.2k | for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { |
1093 | 93.2k | if (Visited.count(DA.Id)) |
1094 | 1.86k | continue; |
1095 | 91.3k | if (DA.Addr->getFlags() & NodeAttrs::Clobbering) |
1096 | 7.16k | continue; |
1097 | 84.1k | |
1098 | 84.1k | NodeList Rel = getRelatedRefs(IA, DA); |
1099 | 84.1k | NodeAddr<DefNode*> PDA = Rel.front(); |
1100 | 84.1k | RegisterRef RR = PDA.Addr->getRegRef(*this); |
1101 | | #ifndef NDEBUG |
1102 | | // Assert if the register is defined in two or more unrelated defs. |
1103 | | // This could happen if there are two or more def operands defining it. |
1104 | | if (!Defined.insert(RR.Reg).second) { |
1105 | | MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode(); |
1106 | | dbgs() << "Multiple definitions of register: " |
1107 | | << Print<RegisterRef>(RR, *this) << " in\n " << *MI << "in " |
1108 | | << printMBBReference(*MI->getParent()) << '\n'; |
1109 | | llvm_unreachable(nullptr); |
1110 | | } |
1111 | | #endif |
1112 | | // Push the definition on the stack for the register and all aliases. |
1113 | 84.1k | // The def stack traversal in linkNodeUp will check the exact aliasing. |
1114 | 84.1k | DefM[RR.Reg].push(DA); |
1115 | 275k | for (RegisterId A : PRI.getAliasSet(RR.Reg)) { |
1116 | 275k | // Check that we don't push the same def twice. |
1117 | 275k | assert(A != RR.Reg); |
1118 | 275k | DefM[A].push(DA); |
1119 | 275k | } |
1120 | 84.1k | // Mark all the related defs as visited. |
1121 | 84.1k | for (NodeAddr<NodeBase*> T : Rel) |
1122 | 86.0k | Visited.insert(T.Id); |
1123 | 84.1k | } |
1124 | 83.1k | } |
1125 | | |
1126 | | // Return the list of all reference nodes related to RA, including RA itself. |
1127 | | // See "getNextRelated" for the meaning of a "related reference". |
1128 | | NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA, |
1129 | 388k | NodeAddr<RefNode*> RA) const { |
1130 | 388k | assert(IA.Id != 0 && RA.Id != 0); |
1131 | 388k | |
1132 | 388k | NodeList Refs; |
1133 | 388k | NodeId Start = RA.Id; |
1134 | 495k | do { |
1135 | 495k | Refs.push_back(RA); |
1136 | 495k | RA = getNextRelated(IA, RA); |
1137 | 495k | } while (RA.Id != 0 && RA.Id != Start106k ); |
1138 | 388k | return Refs; |
1139 | 388k | } |
1140 | | |
1141 | | // Clear all information in the graph. |
1142 | 6.70k | void DataFlowGraph::reset() { |
1143 | 6.70k | Memory.clear(); |
1144 | 6.70k | BlockNodes.clear(); |
1145 | 6.70k | Func = NodeAddr<FuncNode*>(); |
1146 | 6.70k | } |
1147 | | |
1148 | | // Return the next reference node in the instruction node IA that is related |
1149 | | // to RA. Conceptually, two reference nodes are related if they refer to the |
1150 | | // same instance of a register access, but differ in flags or other minor |
1151 | | // characteristics. Specific examples of related nodes are shadow reference |
1152 | | // nodes. |
1153 | | // Return the equivalent of nullptr if there are no more related references. |
1154 | | NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA, |
1155 | 509k | NodeAddr<RefNode*> RA) const { |
1156 | 509k | assert(IA.Id != 0 && RA.Id != 0); |
1157 | 509k | |
1158 | 509k | auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool { |
1159 | 213k | if (TA.Addr->getKind() != RA.Addr->getKind()) |
1160 | 81.6k | return false; |
1161 | 131k | if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this)) |
1162 | 0 | return false; |
1163 | 131k | return true; |
1164 | 131k | }; |
1165 | 509k | auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { |
1166 | 71.9k | return Related(TA) && |
1167 | 71.9k | &RA.Addr->getOp() == &TA.Addr->getOp()29.4k ; |
1168 | 71.9k | }; |
1169 | 509k | auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { |
1170 | 141k | if (!Related(TA)) |
1171 | 39.1k | return false; |
1172 | 102k | if (TA.Addr->getKind() != NodeAttrs::Use) |
1173 | 0 | return true; |
1174 | 102k | // For phi uses, compare predecessor blocks. |
1175 | 102k | const NodeAddr<const PhiUseNode*> TUA = TA; |
1176 | 102k | const NodeAddr<const PhiUseNode*> RUA = RA; |
1177 | 102k | return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor(); |
1178 | 102k | }; |
1179 | 509k | |
1180 | 509k | RegisterRef RR = RA.Addr->getRegRef(*this); |
1181 | 509k | if (IA.Addr->getKind() == NodeAttrs::Stmt) |
1182 | 351k | return RA.Addr->getNextRef(RR, RelatedStmt, true, *this); |
1183 | 158k | return RA.Addr->getNextRef(RR, RelatedPhi, true, *this); |
1184 | 158k | } |
1185 | | |
1186 | | // Find the next node related to RA in IA that satisfies condition P. |
1187 | | // If such a node was found, return a pair where the second element is the |
1188 | | // located node. If such a node does not exist, return a pair where the |
1189 | | // first element is the element after which such a node should be inserted, |
1190 | | // and the second element is a null-address. |
1191 | | template <typename Predicate> |
1192 | | std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>> |
1193 | | DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA, |
1194 | 14.8k | Predicate P) const { |
1195 | 14.8k | assert(IA.Id != 0 && RA.Id != 0); |
1196 | 14.8k | |
1197 | 14.8k | NodeAddr<RefNode*> NA; |
1198 | 14.8k | NodeId Start = RA.Id; |
1199 | 14.8k | while (true) { |
1200 | 14.8k | NA = getNextRelated(IA, RA); |
1201 | 14.8k | if (NA.Id == 0 || NA.Id == Start16 ) |
1202 | 14.8k | break; |
1203 | 16 | if (P(NA)) |
1204 | 16 | break; |
1205 | 0 | RA = NA; |
1206 | 0 | } |
1207 | 14.8k | |
1208 | 14.8k | if (NA.Id != 0 && NA.Id != Start16 ) |
1209 | 16 | return std::make_pair(RA, NA); |
1210 | 14.8k | return std::make_pair(RA, NodeAddr<RefNode*>()); |
1211 | 14.8k | } RDFGraph.cpp:std::__1::pair<llvm::rdf::NodeAddr<llvm::rdf::RefNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*> > llvm::rdf::DataFlowGraph::locateNextRef<llvm::rdf::DataFlowGraph::getNextShadow(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*>, bool)::$_7>(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*>, llvm::rdf::DataFlowGraph::getNextShadow(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*>, bool)::$_7) const Line | Count | Source | 1194 | 14.8k | Predicate P) const { | 1195 | 14.8k | assert(IA.Id != 0 && RA.Id != 0); | 1196 | 14.8k | | 1197 | 14.8k | NodeAddr<RefNode*> NA; | 1198 | 14.8k | NodeId Start = RA.Id; | 1199 | 14.8k | while (true) { | 1200 | 14.8k | NA = getNextRelated(IA, RA); | 1201 | 14.8k | if (NA.Id == 0 || NA.Id == Start16 ) | 1202 | 14.8k | break; | 1203 | 16 | if (P(NA)) | 1204 | 16 | break; | 1205 | 0 | RA = NA; | 1206 | 0 | } | 1207 | 14.8k | | 1208 | 14.8k | if (NA.Id != 0 && NA.Id != Start16 ) | 1209 | 16 | return std::make_pair(RA, NA); | 1210 | 14.8k | return std::make_pair(RA, NodeAddr<RefNode*>()); | 1211 | 14.8k | } |
Unexecuted instantiation: RDFGraph.cpp:std::__1::pair<llvm::rdf::NodeAddr<llvm::rdf::RefNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*> > llvm::rdf::DataFlowGraph::locateNextRef<llvm::rdf::DataFlowGraph::getNextShadow(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*>) const::$_8>(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*>, llvm::rdf::DataFlowGraph::getNextShadow(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::RefNode*>) const::$_8) const |
1212 | | |
1213 | | // Get the next shadow node in IA corresponding to RA, and optionally create |
1214 | | // such a node if it does not exist. |
1215 | | NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, |
1216 | 14.8k | NodeAddr<RefNode*> RA, bool Create) { |
1217 | 14.8k | assert(IA.Id != 0 && RA.Id != 0); |
1218 | 14.8k | |
1219 | 14.8k | uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; |
1220 | 14.8k | auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { |
1221 | 16 | return TA.Addr->getFlags() == Flags; |
1222 | 16 | }; |
1223 | 14.8k | auto Loc = locateNextRef(IA, RA, IsShadow); |
1224 | 14.8k | if (Loc.second.Id != 0 || !Create14.8k ) |
1225 | 16 | return Loc.second; |
1226 | 14.8k | |
1227 | 14.8k | // Create a copy of RA and mark is as shadow. |
1228 | 14.8k | NodeAddr<RefNode*> NA = cloneNode(RA); |
1229 | 14.8k | NA.Addr->setFlags(Flags | NodeAttrs::Shadow); |
1230 | 14.8k | IA.Addr->addMemberAfter(Loc.first, NA, *this); |
1231 | 14.8k | return NA; |
1232 | 14.8k | } |
1233 | | |
1234 | | // Get the next shadow node in IA corresponding to RA. Return null-address |
1235 | | // if such a node does not exist. |
1236 | | NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, |
1237 | 0 | NodeAddr<RefNode*> RA) const { |
1238 | 0 | assert(IA.Id != 0 && RA.Id != 0); |
1239 | 0 | uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; |
1240 | 0 | auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { |
1241 | 0 | return TA.Addr->getFlags() == Flags; |
1242 | 0 | }; |
1243 | 0 | return locateNextRef(IA, RA, IsShadow).second; |
1244 | 0 | } |
1245 | | |
1246 | | // Create a new statement node in the block node BA that corresponds to |
1247 | | // the machine instruction MI. |
1248 | 59.5k | void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) { |
1249 | 59.5k | NodeAddr<StmtNode*> SA = newStmt(BA, &In); |
1250 | 59.5k | |
1251 | 59.5k | auto isCall = [] (const MachineInstr &In) -> bool { |
1252 | 59.5k | if (In.isCall()) |
1253 | 1.43k | return true; |
1254 | 58.0k | // Is tail call? |
1255 | 58.0k | if (In.isBranch()) { |
1256 | 10.9k | for (const MachineOperand &Op : In.operands()) |
1257 | 29.3k | if (Op.isGlobal() || Op.isSymbol()) |
1258 | 0 | return true; |
1259 | 10.9k | // Assume indirect branches are calls. This is for the purpose of |
1260 | 10.9k | // keeping implicit operands, and so it won't hurt on intra-function |
1261 | 10.9k | // indirect branches. |
1262 | 10.9k | if (In.isIndirectBranch()) |
1263 | 6.58k | return true; |
1264 | 51.5k | } |
1265 | 51.5k | return false; |
1266 | 51.5k | }; |
1267 | 59.5k | |
1268 | 59.5k | auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool { |
1269 | 2.62k | // This instruction defines DR. Check if there is a use operand that |
1270 | 2.62k | // would make DR live on entry to the instruction. |
1271 | 8.63k | for (const MachineOperand &Op : In.operands()) { |
1272 | 8.63k | if (!Op.isReg() || Op.getReg() == 06.40k || !Op.isUse()6.40k || Op.isUndef()3.77k ) |
1273 | 4.98k | continue; |
1274 | 3.65k | RegisterRef UR = makeRegRef(Op); |
1275 | 3.65k | if (PRI.alias(DR, UR)) |
1276 | 722 | return false; |
1277 | 3.65k | } |
1278 | 2.62k | return true1.90k ; |
1279 | 2.62k | }; |
1280 | 59.5k | |
1281 | 59.5k | bool IsCall = isCall(In); |
1282 | 59.5k | unsigned NumOps = In.getNumOperands(); |
1283 | 59.5k | |
1284 | 59.5k | // Avoid duplicate implicit defs. This will not detect cases of implicit |
1285 | 59.5k | // defs that define registers that overlap, but it is not clear how to |
1286 | 59.5k | // interpret that in the absence of explicit defs. Overlapping explicit |
1287 | 59.5k | // defs are likely illegal already. |
1288 | 59.5k | BitVector DoneDefs(TRI.getNumRegs()); |
1289 | 59.5k | // Process explicit defs first. |
1290 | 249k | for (unsigned OpN = 0; OpN < NumOps; ++OpN189k ) { |
1291 | 189k | MachineOperand &Op = In.getOperand(OpN); |
1292 | 189k | if (!Op.isReg() || !Op.isDef()145k || Op.isImplicit()63.2k ) |
1293 | 151k | continue; |
1294 | 38.6k | unsigned R = Op.getReg(); |
1295 | 38.6k | if (!R || !TargetRegisterInfo::isPhysicalRegister(R)) |
1296 | 0 | continue; |
1297 | 38.6k | uint16_t Flags = NodeAttrs::None; |
1298 | 38.6k | if (TOI.isPreserving(In, OpN)) { |
1299 | 1.05k | Flags |= NodeAttrs::Preserving; |
1300 | 1.05k | // If the def is preserving, check if it is also undefined. |
1301 | 1.05k | if (isDefUndef(In, makeRegRef(Op))) |
1302 | 334 | Flags |= NodeAttrs::Undef; |
1303 | 1.05k | } |
1304 | 38.6k | if (TOI.isClobbering(In, OpN)) |
1305 | 0 | Flags |= NodeAttrs::Clobbering; |
1306 | 38.6k | if (TOI.isFixedReg(In, OpN)) |
1307 | 30 | Flags |= NodeAttrs::Fixed; |
1308 | 38.6k | if (IsCall && Op.isDead()0 ) |
1309 | 0 | Flags |= NodeAttrs::Dead; |
1310 | 38.6k | NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); |
1311 | 38.6k | SA.Addr->addMember(DA, *this); |
1312 | 38.6k | assert(!DoneDefs.test(R)); |
1313 | 38.6k | DoneDefs.set(R); |
1314 | 38.6k | } |
1315 | 59.5k | |
1316 | 59.5k | // Process reg-masks (as clobbers). |
1317 | 59.5k | BitVector DoneClobbers(TRI.getNumRegs()); |
1318 | 249k | for (unsigned OpN = 0; OpN < NumOps; ++OpN189k ) { |
1319 | 189k | MachineOperand &Op = In.getOperand(OpN); |
1320 | 189k | if (!Op.isRegMask()) |
1321 | 188k | continue; |
1322 | 1.43k | uint16_t Flags = NodeAttrs::Clobbering | NodeAttrs::Fixed | |
1323 | 1.43k | NodeAttrs::Dead; |
1324 | 1.43k | NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); |
1325 | 1.43k | SA.Addr->addMember(DA, *this); |
1326 | 1.43k | // Record all clobbered registers in DoneDefs. |
1327 | 1.43k | const uint32_t *RM = Op.getRegMask(); |
1328 | 283k | for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i281k ) |
1329 | 281k | if (!(RM[i/32] & (1u << (i%32)))) |
1330 | 255k | DoneClobbers.set(i); |
1331 | 1.43k | } |
1332 | 59.5k | |
1333 | 59.5k | // Process implicit defs, skipping those that have already been added |
1334 | 59.5k | // as explicit. |
1335 | 249k | for (unsigned OpN = 0; OpN < NumOps; ++OpN189k ) { |
1336 | 189k | MachineOperand &Op = In.getOperand(OpN); |
1337 | 189k | if (!Op.isReg() || !Op.isDef()145k || !Op.isImplicit()63.2k ) |
1338 | 165k | continue; |
1339 | 24.6k | unsigned R = Op.getReg(); |
1340 | 24.6k | if (!R || !TargetRegisterInfo::isPhysicalRegister(R) || DoneDefs.test(R)) |
1341 | 0 | continue; |
1342 | 24.6k | RegisterRef RR = makeRegRef(Op); |
1343 | 24.6k | uint16_t Flags = NodeAttrs::None; |
1344 | 24.6k | if (TOI.isPreserving(In, OpN)) { |
1345 | 1.56k | Flags |= NodeAttrs::Preserving; |
1346 | 1.56k | // If the def is preserving, check if it is also undefined. |
1347 | 1.56k | if (isDefUndef(In, RR)) |
1348 | 1.56k | Flags |= NodeAttrs::Undef; |
1349 | 1.56k | } |
1350 | 24.6k | if (TOI.isClobbering(In, OpN)) |
1351 | 2.77k | Flags |= NodeAttrs::Clobbering; |
1352 | 24.6k | if (TOI.isFixedReg(In, OpN)) |
1353 | 24.6k | Flags |= NodeAttrs::Fixed; |
1354 | 24.6k | if (IsCall && Op.isDead()11.3k ) { |
1355 | 9.35k | if (DoneClobbers.test(R)) |
1356 | 2.77k | continue; |
1357 | 6.58k | Flags |= NodeAttrs::Dead; |
1358 | 6.58k | } |
1359 | 24.6k | NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); |
1360 | 21.8k | SA.Addr->addMember(DA, *this); |
1361 | 21.8k | DoneDefs.set(R); |
1362 | 21.8k | } |
1363 | 59.5k | |
1364 | 249k | for (unsigned OpN = 0; OpN < NumOps; ++OpN189k ) { |
1365 | 189k | MachineOperand &Op = In.getOperand(OpN); |
1366 | 189k | if (!Op.isReg() || !Op.isUse()145k ) |
1367 | 107k | continue; |
1368 | 82.4k | unsigned R = Op.getReg(); |
1369 | 82.4k | if (!R || !TargetRegisterInfo::isPhysicalRegister(R)) |
1370 | 0 | continue; |
1371 | 82.4k | uint16_t Flags = NodeAttrs::None; |
1372 | 82.4k | if (Op.isUndef()) |
1373 | 775 | Flags |= NodeAttrs::Undef; |
1374 | 82.4k | if (TOI.isFixedReg(In, OpN)) |
1375 | 22.4k | Flags |= NodeAttrs::Fixed; |
1376 | 82.4k | NodeAddr<UseNode*> UA = newUse(SA, Op, Flags); |
1377 | 82.4k | SA.Addr->addMember(UA, *this); |
1378 | 82.4k | } |
1379 | 59.5k | } |
1380 | | |
1381 | | // Scan all defs in the block node BA and record in PhiM the locations of |
1382 | | // phi nodes corresponding to these defs. |
1383 | | void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, |
1384 | 11.1k | NodeAddr<BlockNode*> BA) { |
1385 | 11.1k | // Check all defs from block BA and record them in each block in BA's |
1386 | 11.1k | // iterated dominance frontier. This information will later be used to |
1387 | 11.1k | // create phi nodes. |
1388 | 11.1k | MachineBasicBlock *BB = BA.Addr->getCode(); |
1389 | 11.1k | assert(BB); |
1390 | 11.1k | auto DFLoc = MDF.find(BB); |
1391 | 11.1k | if (DFLoc == MDF.end() || DFLoc->second.empty()) |
1392 | 8.05k | return; |
1393 | 3.05k | |
1394 | 3.05k | // Traverse all instructions in the block and collect the set of all |
1395 | 3.05k | // defined references. For each reference there will be a phi created |
1396 | 3.05k | // in the block's iterated dominance frontier. |
1397 | 3.05k | // This is done to make sure that each defined reference gets only one |
1398 | 3.05k | // phi node, even if it is defined multiple times. |
1399 | 3.05k | RegisterSet Defs; |
1400 | 3.05k | for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) |
1401 | 19.2k | for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this)) |
1402 | 22.2k | Defs.insert(RA.Addr->getRegRef(*this)); |
1403 | 3.05k | |
1404 | 3.05k | // Calculate the iterated dominance frontier of BB. |
1405 | 3.05k | const MachineDominanceFrontier::DomSetType &DF = DFLoc->second; |
1406 | 3.05k | SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end()); |
1407 | 8.76k | for (unsigned i = 0; i < IDF.size(); ++i5.70k ) { |
1408 | 5.70k | auto F = MDF.find(IDF[i]); |
1409 | 5.70k | if (F != MDF.end()) |
1410 | 5.70k | IDF.insert(F->second.begin(), F->second.end()); |
1411 | 5.70k | } |
1412 | 3.05k | |
1413 | 3.05k | // Finally, add the set of defs to each block in the iterated dominance |
1414 | 3.05k | // frontier. |
1415 | 5.70k | for (auto DB : IDF) { |
1416 | 5.70k | NodeAddr<BlockNode*> DBA = findBlock(DB); |
1417 | 5.70k | PhiM[DBA.Id].insert(Defs.begin(), Defs.end()); |
1418 | 5.70k | } |
1419 | 3.05k | } |
1420 | | |
1421 | | // Given the locations of phi nodes in the map PhiM, create the phi nodes |
1422 | | // that are located in the block node BA. |
1423 | | void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs, |
1424 | 11.1k | NodeAddr<BlockNode*> BA) { |
1425 | 11.1k | // Check if this blocks has any DF defs, i.e. if there are any defs |
1426 | 11.1k | // that this block is in the iterated dominance frontier of. |
1427 | 11.1k | auto HasDF = PhiM.find(BA.Id); |
1428 | 11.1k | if (HasDF == PhiM.end() || HasDF->second.empty()1.77k ) |
1429 | 9.34k | return; |
1430 | 1.76k | |
1431 | 1.76k | // First, remove all R in Refs in such that there exists T in Refs |
1432 | 1.76k | // such that T covers R. In other words, only leave those refs that |
1433 | 1.76k | // are not covered by another ref (i.e. maximal with respect to covering). |
1434 | 1.76k | |
1435 | 29.6k | auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef 1.76k { |
1436 | 29.6k | for (RegisterRef I : RRs) |
1437 | 632k | if (I != RR && RegisterAggr::isCoverOf(I, RR, PRI)603k ) |
1438 | 5.36k | RR = I; |
1439 | 29.6k | return RR; |
1440 | 29.6k | }; |
1441 | 1.76k | |
1442 | 1.76k | RegisterSet MaxDF; |
1443 | 1.76k | for (RegisterRef I : HasDF->second) |
1444 | 16.1k | MaxDF.insert(MaxCoverIn(I, HasDF->second)); |
1445 | 1.76k | |
1446 | 1.76k | std::vector<RegisterRef> MaxRefs; |
1447 | 1.76k | for (RegisterRef I : MaxDF) |
1448 | 13.5k | MaxRefs.push_back(MaxCoverIn(I, AllRefs)); |
1449 | 1.76k | |
1450 | 1.76k | // Now, for each R in MaxRefs, get the alias closure of R. If the closure |
1451 | 1.76k | // only has R in it, create a phi a def for R. Otherwise, create a phi, |
1452 | 1.76k | // and add a def for each S in the closure. |
1453 | 1.76k | |
1454 | 1.76k | // Sort the refs so that the phis will be created in a deterministic order. |
1455 | 1.76k | llvm::sort(MaxRefs); |
1456 | 1.76k | // Remove duplicates. |
1457 | 1.76k | auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end()); |
1458 | 1.76k | MaxRefs.erase(NewEnd, MaxRefs.end()); |
1459 | 1.76k | |
1460 | 1.76k | auto Aliased = [this,&MaxRefs](RegisterRef RR, |
1461 | 71.1k | std::vector<unsigned> &Closure) -> bool { |
1462 | 71.1k | for (unsigned I : Closure) |
1463 | 71.1k | if (PRI.alias(RR, MaxRefs[I])) |
1464 | 0 | return true; |
1465 | 71.1k | return false; |
1466 | 71.1k | }; |
1467 | 1.76k | |
1468 | 1.76k | // Prepare a list of NodeIds of the block's predecessors. |
1469 | 1.76k | NodeList Preds; |
1470 | 1.76k | const MachineBasicBlock *MBB = BA.Addr->getCode(); |
1471 | 1.76k | for (MachineBasicBlock *PB : MBB->predecessors()) |
1472 | 3.78k | Preds.push_back(findBlock(PB)); |
1473 | 1.76k | |
1474 | 13.5k | while (!MaxRefs.empty()) { |
1475 | 11.8k | // Put the first element in the closure, and then add all subsequent |
1476 | 11.8k | // elements from MaxRefs to it, if they alias at least one element |
1477 | 11.8k | // already in the closure. |
1478 | 11.8k | // ClosureIdx: vector of indices in MaxRefs of members of the closure. |
1479 | 11.8k | std::vector<unsigned> ClosureIdx = { 0 }; |
1480 | 82.9k | for (unsigned i = 1; i != MaxRefs.size(); ++i71.1k ) |
1481 | 71.1k | if (Aliased(MaxRefs[i], ClosureIdx)) |
1482 | 0 | ClosureIdx.push_back(i); |
1483 | 11.8k | |
1484 | 11.8k | // Build a phi for the closure. |
1485 | 11.8k | unsigned CS = ClosureIdx.size(); |
1486 | 11.8k | NodeAddr<PhiNode*> PA = newPhi(BA); |
1487 | 11.8k | |
1488 | 11.8k | // Add defs. |
1489 | 23.6k | for (unsigned X = 0; X != CS; ++X11.8k ) { |
1490 | 11.8k | RegisterRef RR = MaxRefs[ClosureIdx[X]]; |
1491 | 11.8k | uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; |
1492 | 11.8k | NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); |
1493 | 11.8k | PA.Addr->addMember(DA, *this); |
1494 | 11.8k | } |
1495 | 11.8k | // Add phi uses. |
1496 | 25.2k | for (NodeAddr<BlockNode*> PBA : Preds) { |
1497 | 50.5k | for (unsigned X = 0; X != CS; ++X25.2k ) { |
1498 | 25.2k | RegisterRef RR = MaxRefs[ClosureIdx[X]]; |
1499 | 25.2k | NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); |
1500 | 25.2k | PA.Addr->addMember(PUA, *this); |
1501 | 25.2k | } |
1502 | 25.2k | } |
1503 | 11.8k | |
1504 | 11.8k | // Erase from MaxRefs all elements in the closure. |
1505 | 11.8k | auto Begin = MaxRefs.begin(); |
1506 | 23.6k | for (unsigned i = ClosureIdx.size(); i != 0; --i11.8k ) |
1507 | 11.8k | MaxRefs.erase(Begin + ClosureIdx[i-1]); |
1508 | 11.8k | } |
1509 | 1.76k | } |
1510 | | |
1511 | | // Remove any unneeded phi nodes that were created during the build process. |
1512 | 9 | void DataFlowGraph::removeUnusedPhis() { |
1513 | 9 | // This will remove unused phis, i.e. phis where each def does not reach |
1514 | 9 | // any uses or other defs. This will not detect or remove circular phi |
1515 | 9 | // chains that are otherwise dead. Unused/dead phis are created during |
1516 | 9 | // the build process and this function is intended to remove these cases |
1517 | 9 | // that are easily determinable to be unnecessary. |
1518 | 9 | |
1519 | 9 | SetVector<NodeId> PhiQ; |
1520 | 27 | for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) { |
1521 | 27 | for (auto P : BA.Addr->members_if(IsPhi, *this)) |
1522 | 19 | PhiQ.insert(P.Id); |
1523 | 27 | } |
1524 | 9 | |
1525 | 19 | static auto HasUsedDef = [](NodeList &Ms) -> bool { |
1526 | 28 | for (NodeAddr<NodeBase*> M : Ms) { |
1527 | 28 | if (M.Addr->getKind() != NodeAttrs::Def) |
1528 | 9 | continue; |
1529 | 19 | NodeAddr<DefNode*> DA = M; |
1530 | 19 | if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 05 ) |
1531 | 16 | return true; |
1532 | 19 | } |
1533 | 19 | return false3 ; |
1534 | 19 | }; |
1535 | 9 | |
1536 | 9 | // Any phi, if it is removed, may affect other phis (make them dead). |
1537 | 9 | // For each removed phi, collect the potentially affected phis and add |
1538 | 9 | // them back to the queue. |
1539 | 28 | while (!PhiQ.empty()) { |
1540 | 19 | auto PA = addr<PhiNode*>(PhiQ[0]); |
1541 | 19 | PhiQ.remove(PA.Id); |
1542 | 19 | NodeList Refs = PA.Addr->members(*this); |
1543 | 19 | if (HasUsedDef(Refs)) |
1544 | 16 | continue; |
1545 | 12 | for (NodeAddr<RefNode*> RA : Refs)3 { |
1546 | 12 | if (NodeId RD = RA.Addr->getReachingDef()) { |
1547 | 7 | auto RDA = addr<DefNode*>(RD); |
1548 | 7 | NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this); |
1549 | 7 | if (IsPhi(OA)) |
1550 | 0 | PhiQ.insert(OA.Id); |
1551 | 7 | } |
1552 | 12 | if (RA.Addr->isDef()) |
1553 | 3 | unlinkDef(RA, true); |
1554 | 9 | else |
1555 | 9 | unlinkUse(RA, true); |
1556 | 12 | } |
1557 | 3 | NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this); |
1558 | 3 | BA.Addr->removeMember(PA, *this); |
1559 | 3 | } |
1560 | 9 | } |
1561 | | |
1562 | | // For a given reference node TA in an instruction node IA, connect the |
1563 | | // reaching def of TA to the appropriate def node. Create any shadow nodes |
1564 | | // as appropriate. |
1565 | | template <typename T> |
1566 | | void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA, |
1567 | 142k | DefStack &DS) { |
1568 | 142k | if (DS.empty()) |
1569 | 4.52k | return; |
1570 | 137k | RegisterRef RR = TA.Addr->getRegRef(*this); |
1571 | 137k | NodeAddr<T> TAP; |
1572 | 137k | |
1573 | 137k | // References from the def stack that have been examined so far. |
1574 | 137k | RegisterAggr Defs(PRI); |
1575 | 137k | |
1576 | 166k | for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()28.5k ) { |
1577 | 165k | RegisterRef QR = I->Addr->getRegRef(*this); |
1578 | 165k | |
1579 | 165k | // Skip all defs that are aliased to any of the defs that we have already |
1580 | 165k | // seen. If this completes a cover of RR, stop the stack traversal. |
1581 | 165k | bool Alias = Defs.hasAliasOf(QR); |
1582 | 165k | bool Cover = Defs.insert(QR).hasCoverOf(RR); |
1583 | 165k | if (Alias) { |
1584 | 12.7k | if (Cover) |
1585 | 2.56k | break; |
1586 | 10.1k | continue; |
1587 | 10.1k | } |
1588 | 152k | |
1589 | 152k | // The reaching def. |
1590 | 152k | NodeAddr<DefNode*> RDA = *I; |
1591 | 152k | |
1592 | 152k | // Pick the reached node. |
1593 | 152k | if (TAP.Id == 0) { |
1594 | 137k | TAP = TA; |
1595 | 137k | } else { |
1596 | 14.8k | // Mark the existing ref as "shadow" and create a new shadow. |
1597 | 14.8k | TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow); |
1598 | 14.8k | TAP = getNextShadow(IA, TAP, true); |
1599 | 14.8k | } |
1600 | 152k | |
1601 | 152k | // Create the link. |
1602 | 152k | TAP.Addr->linkToDef(TAP.Id, RDA); |
1603 | 152k | |
1604 | 152k | if (Cover) |
1605 | 133k | break; |
1606 | 152k | } |
1607 | 137k | } void llvm::rdf::DataFlowGraph::linkRefUp<llvm::rdf::DefNode*>(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::DefNode*>, llvm::rdf::DataFlowGraph::DefStack&) Line | Count | Source | 1567 | 43.3k | DefStack &DS) { | 1568 | 43.3k | if (DS.empty()) | 1569 | 0 | return; | 1570 | 43.3k | RegisterRef RR = TA.Addr->getRegRef(*this); | 1571 | 43.3k | NodeAddr<T> TAP; | 1572 | 43.3k | | 1573 | 43.3k | // References from the def stack that have been examined so far. | 1574 | 43.3k | RegisterAggr Defs(PRI); | 1575 | 43.3k | | 1576 | 57.7k | for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()14.3k ) { | 1577 | 57.0k | RegisterRef QR = I->Addr->getRegRef(*this); | 1578 | 57.0k | | 1579 | 57.0k | // Skip all defs that are aliased to any of the defs that we have already | 1580 | 57.0k | // seen. If this completes a cover of RR, stop the stack traversal. | 1581 | 57.0k | bool Alias = Defs.hasAliasOf(QR); | 1582 | 57.0k | bool Cover = Defs.insert(QR).hasCoverOf(RR); | 1583 | 57.0k | if (Alias) { | 1584 | 6.03k | if (Cover) | 1585 | 1.06k | break; | 1586 | 4.96k | continue; | 1587 | 4.96k | } | 1588 | 51.0k | | 1589 | 51.0k | // The reaching def. | 1590 | 51.0k | NodeAddr<DefNode*> RDA = *I; | 1591 | 51.0k | | 1592 | 51.0k | // Pick the reached node. | 1593 | 51.0k | if (TAP.Id == 0) { | 1594 | 43.3k | TAP = TA; | 1595 | 43.3k | } else { | 1596 | 7.61k | // Mark the existing ref as "shadow" and create a new shadow. | 1597 | 7.61k | TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow); | 1598 | 7.61k | TAP = getNextShadow(IA, TAP, true); | 1599 | 7.61k | } | 1600 | 51.0k | | 1601 | 51.0k | // Create the link. | 1602 | 51.0k | TAP.Addr->linkToDef(TAP.Id, RDA); | 1603 | 51.0k | | 1604 | 51.0k | if (Cover) | 1605 | 41.6k | break; | 1606 | 51.0k | } | 1607 | 43.3k | } |
void llvm::rdf::DataFlowGraph::linkRefUp<llvm::rdf::UseNode*>(llvm::rdf::NodeAddr<llvm::rdf::InstrNode*>, llvm::rdf::NodeAddr<llvm::rdf::UseNode*>, llvm::rdf::DataFlowGraph::DefStack&) Line | Count | Source | 1567 | 98.6k | DefStack &DS) { | 1568 | 98.6k | if (DS.empty()) | 1569 | 4.52k | return; | 1570 | 94.1k | RegisterRef RR = TA.Addr->getRegRef(*this); | 1571 | 94.1k | NodeAddr<T> TAP; | 1572 | 94.1k | | 1573 | 94.1k | // References from the def stack that have been examined so far. | 1574 | 94.1k | RegisterAggr Defs(PRI); | 1575 | 94.1k | | 1576 | 108k | for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()14.2k ) { | 1577 | 108k | RegisterRef QR = I->Addr->getRegRef(*this); | 1578 | 108k | | 1579 | 108k | // Skip all defs that are aliased to any of the defs that we have already | 1580 | 108k | // seen. If this completes a cover of RR, stop the stack traversal. | 1581 | 108k | bool Alias = Defs.hasAliasOf(QR); | 1582 | 108k | bool Cover = Defs.insert(QR).hasCoverOf(RR); | 1583 | 108k | if (Alias) { | 1584 | 6.68k | if (Cover) | 1585 | 1.49k | break; | 1586 | 5.18k | continue; | 1587 | 5.18k | } | 1588 | 101k | | 1589 | 101k | // The reaching def. | 1590 | 101k | NodeAddr<DefNode*> RDA = *I; | 1591 | 101k | | 1592 | 101k | // Pick the reached node. | 1593 | 101k | if (TAP.Id == 0) { | 1594 | 94.1k | TAP = TA; | 1595 | 94.1k | } else { | 1596 | 7.26k | // Mark the existing ref as "shadow" and create a new shadow. | 1597 | 7.26k | TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow); | 1598 | 7.26k | TAP = getNextShadow(IA, TAP, true); | 1599 | 7.26k | } | 1600 | 101k | | 1601 | 101k | // Create the link. | 1602 | 101k | TAP.Addr->linkToDef(TAP.Id, RDA); | 1603 | 101k | | 1604 | 101k | if (Cover) | 1605 | 92.3k | break; | 1606 | 101k | } | 1607 | 94.1k | } |
|
1608 | | |
1609 | | // Create data-flow links for all reference nodes in the statement node SA. |
1610 | | template <typename Predicate> |
1611 | | void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA, |
1612 | 178k | Predicate P) { |
1613 | | #ifndef NDEBUG |
1614 | | RegisterSet Defs; |
1615 | | #endif |
1616 | | |
1617 | 178k | // Link all nodes (upwards in the data-flow) with their reaching defs. |
1618 | 178k | for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) { |
1619 | 144k | uint16_t Kind = RA.Addr->getKind(); |
1620 | 144k | assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use); |
1621 | 144k | RegisterRef RR = RA.Addr->getRegRef(*this); |
1622 | | #ifndef NDEBUG |
1623 | | // Do not expect multiple defs of the same reference. |
1624 | | assert(Kind != NodeAttrs::Def || !Defs.count(RR)); |
1625 | | Defs.insert(RR); |
1626 | | #endif |
1627 | | |
1628 | 144k | auto F = DefM.find(RR.Reg); |
1629 | 144k | if (F == DefM.end()) |
1630 | 27.6k | continue; |
1631 | 116k | DefStack &DS = F->second; |
1632 | 116k | if (Kind == NodeAttrs::Use) |
1633 | 73.3k | linkRefUp<UseNode*>(SA, RA, DS); |
1634 | 43.3k | else if (Kind == NodeAttrs::Def) |
1635 | 43.3k | linkRefUp<DefNode*>(SA, RA, DS); |
1636 | 43.3k | else |
1637 | 43.3k | llvm_unreachable0 ("Unexpected node in instruction"); |
1638 | 116k | } |
1639 | 178k | } void llvm::rdf::DataFlowGraph::linkStmtRefs<bool (*)(llvm::rdf::NodeAddr<llvm::rdf::NodeBase*>)>(std::__1::unordered_map<unsigned int, llvm::rdf::DataFlowGraph::DefStack, std::__1::hash<unsigned int>, std::__1::equal_to<unsigned int>, std::__1::allocator<std::__1::pair<unsigned int const, llvm::rdf::DataFlowGraph::DefStack> > >&, llvm::rdf::NodeAddr<llvm::rdf::StmtNode*>, bool (*)(llvm::rdf::NodeAddr<llvm::rdf::NodeBase*>)) Line | Count | Source | 1612 | 59.5k | Predicate P) { | 1613 | | #ifndef NDEBUG | 1614 | | RegisterSet Defs; | 1615 | | #endif | 1616 | | | 1617 | 59.5k | // Link all nodes (upwards in the data-flow) with their reaching defs. | 1618 | 82.4k | for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) { | 1619 | 82.4k | uint16_t Kind = RA.Addr->getKind(); | 1620 | 82.4k | assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use); | 1621 | 82.4k | RegisterRef RR = RA.Addr->getRegRef(*this); | 1622 | | #ifndef NDEBUG | 1623 | | // Do not expect multiple defs of the same reference. | 1624 | | assert(Kind != NodeAttrs::Def || !Defs.count(RR)); | 1625 | | Defs.insert(RR); | 1626 | | #endif | 1627 | | | 1628 | 82.4k | auto F = DefM.find(RR.Reg); | 1629 | 82.4k | if (F == DefM.end()) | 1630 | 9.05k | continue; | 1631 | 73.3k | DefStack &DS = F->second; | 1632 | 73.3k | if (Kind == NodeAttrs::Use) | 1633 | 73.3k | linkRefUp<UseNode*>(SA, RA, DS); | 1634 | 0 | else if (Kind == NodeAttrs::Def) | 1635 | 0 | linkRefUp<DefNode*>(SA, RA, DS); | 1636 | 0 | else | 1637 | 0 | llvm_unreachable("Unexpected node in instruction"); | 1638 | 73.3k | } | 1639 | 59.5k | } |
RDFGraph.cpp:void llvm::rdf::DataFlowGraph::linkStmtRefs<llvm::rdf::DataFlowGraph::linkBlockRefs(std::__1::unordered_map<unsigned int, llvm::rdf::DataFlowGraph::DefStack, std::__1::hash<unsigned int>, std::__1::equal_to<unsigned int>, std::__1::allocator<std::__1::pair<unsigned int const, llvm::rdf::DataFlowGraph::DefStack> > >&, llvm::rdf::NodeAddr<llvm::rdf::BlockNode*>)::$_14>(std::__1::unordered_map<unsigned int, llvm::rdf::DataFlowGraph::DefStack, std::__1::hash<unsigned int>, std::__1::equal_to<unsigned int>, std::__1::allocator<std::__1::pair<unsigned int const, llvm::rdf::DataFlowGraph::DefStack> > >&, llvm::rdf::NodeAddr<llvm::rdf::StmtNode*>, llvm::rdf::DataFlowGraph::linkBlockRefs(std::__1::unordered_map<unsigned int, llvm::rdf::DataFlowGraph::DefStack, std::__1::hash<unsigned int>, std::__1::equal_to<unsigned int>, std::__1::allocator<std::__1::pair<unsigned int const, llvm::rdf::DataFlowGraph::DefStack> > >&, llvm::rdf::NodeAddr<llvm::rdf::BlockNode*>)::$_14) Line | Count | Source | 1612 | 59.5k | Predicate P) { | 1613 | | #ifndef NDEBUG | 1614 | | RegisterSet Defs; | 1615 | | #endif | 1616 | | | 1617 | 59.5k | // Link all nodes (upwards in the data-flow) with their reaching defs. | 1618 | 59.5k | for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) { | 1619 | 1.43k | uint16_t Kind = RA.Addr->getKind(); | 1620 | 1.43k | assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use); | 1621 | 1.43k | RegisterRef RR = RA.Addr->getRegRef(*this); | 1622 | | #ifndef NDEBUG | 1623 | | // Do not expect multiple defs of the same reference. | 1624 | | assert(Kind != NodeAttrs::Def || !Defs.count(RR)); | 1625 | | Defs.insert(RR); | 1626 | | #endif | 1627 | | | 1628 | 1.43k | auto F = DefM.find(RR.Reg); | 1629 | 1.43k | if (F == DefM.end()) | 1630 | 6 | continue; | 1631 | 1.43k | DefStack &DS = F->second; | 1632 | 1.43k | if (Kind == NodeAttrs::Use) | 1633 | 0 | linkRefUp<UseNode*>(SA, RA, DS); | 1634 | 1.43k | else if (Kind == NodeAttrs::Def) | 1635 | 1.43k | linkRefUp<DefNode*>(SA, RA, DS); | 1636 | 1.43k | else | 1637 | 1.43k | llvm_unreachable0 ("Unexpected node in instruction"); | 1638 | 1.43k | } | 1639 | 59.5k | } |
RDFGraph.cpp:void llvm::rdf::DataFlowGraph::linkStmtRefs<llvm::rdf::DataFlowGraph::linkBlockRefs(std::__1::unordered_map<unsigned int, llvm::rdf::DataFlowGraph::DefStack, std::__1::hash<unsigned int>, std::__1::equal_to<unsigned int>, std::__1::allocator<std::__1::pair<unsigned int const, llvm::rdf::DataFlowGraph::DefStack> > >&, llvm::rdf::NodeAddr<llvm::rdf::BlockNode*>)::$_15>(std::__1::unordered_map<unsigned int, llvm::rdf::DataFlowGraph::DefStack, std::__1::hash<unsigned int>, std::__1::equal_to<unsigned int>, std::__1::allocator<std::__1::pair<unsigned int const, llvm::rdf::DataFlowGraph::DefStack> > >&, llvm::rdf::NodeAddr<llvm::rdf::StmtNode*>, llvm::rdf::DataFlowGraph::linkBlockRefs(std::__1::unordered_map<unsigned int, llvm::rdf::DataFlowGraph::DefStack, std::__1::hash<unsigned int>, std::__1::equal_to<unsigned int>, std::__1::allocator<std::__1::pair<unsigned int const, llvm::rdf::DataFlowGraph::DefStack> > >&, llvm::rdf::NodeAddr<llvm::rdf::BlockNode*>)::$_15) Line | Count | Source | 1612 | 59.5k | Predicate P) { | 1613 | | #ifndef NDEBUG | 1614 | | RegisterSet Defs; | 1615 | | #endif | 1616 | | | 1617 | 59.5k | // Link all nodes (upwards in the data-flow) with their reaching defs. | 1618 | 60.5k | for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) { | 1619 | 60.5k | uint16_t Kind = RA.Addr->getKind(); | 1620 | 60.5k | assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use); | 1621 | 60.5k | RegisterRef RR = RA.Addr->getRegRef(*this); | 1622 | | #ifndef NDEBUG | 1623 | | // Do not expect multiple defs of the same reference. | 1624 | | assert(Kind != NodeAttrs::Def || !Defs.count(RR)); | 1625 | | Defs.insert(RR); | 1626 | | #endif | 1627 | | | 1628 | 60.5k | auto F = DefM.find(RR.Reg); | 1629 | 60.5k | if (F == DefM.end()) | 1630 | 18.5k | continue; | 1631 | 41.9k | DefStack &DS = F->second; | 1632 | 41.9k | if (Kind == NodeAttrs::Use) | 1633 | 0 | linkRefUp<UseNode*>(SA, RA, DS); | 1634 | 41.9k | else if (Kind == NodeAttrs::Def) | 1635 | 41.9k | linkRefUp<DefNode*>(SA, RA, DS); | 1636 | 41.9k | else | 1637 | 41.9k | llvm_unreachable0 ("Unexpected node in instruction"); | 1638 | 41.9k | } | 1639 | 59.5k | } |
|
1640 | | |
1641 | | // Create data-flow links for all instructions in the block node BA. This |
1642 | | // will include updating any phi nodes in BA. |
1643 | 11.1k | void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) { |
1644 | 11.1k | // Push block delimiters. |
1645 | 11.1k | markBlock(BA.Id, DefM); |
1646 | 11.1k | |
1647 | 146k | auto IsClobber = [] (NodeAddr<RefNode*> RA) -> bool { |
1648 | 146k | return IsDef(RA) && (RA.Addr->getFlags() & NodeAttrs::Clobbering)61.9k ; |
1649 | 146k | }; |
1650 | 152k | auto IsNoClobber = [] (NodeAddr<RefNode*> RA) -> bool { |
1651 | 152k | return IsDef(RA) && !(RA.Addr->getFlags() & NodeAttrs::Clobbering)67.6k ; |
1652 | 152k | }; |
1653 | 11.1k | |
1654 | 11.1k | assert(BA.Addr && "block node address is needed to create a data-flow link"); |
1655 | 11.1k | // For each non-phi instruction in the block, link all the defs and uses |
1656 | 11.1k | // to their reaching defs. For any member of the block (including phis), |
1657 | 11.1k | // push the defs on the corresponding stacks. |
1658 | 83.1k | for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) { |
1659 | 83.1k | // Ignore phi nodes here. They will be linked part by part from the |
1660 | 83.1k | // predecessors. |
1661 | 83.1k | if (IA.Addr->getKind() == NodeAttrs::Stmt) { |
1662 | 59.5k | linkStmtRefs(DefM, IA, IsUse); |
1663 | 59.5k | linkStmtRefs(DefM, IA, IsClobber); |
1664 | 59.5k | } |
1665 | 83.1k | |
1666 | 83.1k | // Push the definitions on the stack. |
1667 | 83.1k | pushClobbers(IA, DefM); |
1668 | 83.1k | |
1669 | 83.1k | if (IA.Addr->getKind() == NodeAttrs::Stmt) |
1670 | 59.5k | linkStmtRefs(DefM, IA, IsNoClobber); |
1671 | 83.1k | |
1672 | 83.1k | pushDefs(IA, DefM); |
1673 | 83.1k | } |
1674 | 11.1k | |
1675 | 11.1k | // Recursively process all children in the dominator tree. |
1676 | 11.1k | MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode()); |
1677 | 11.1k | for (auto I : *N) { |
1678 | 4.40k | MachineBasicBlock *SB = I->getBlock(); |
1679 | 4.40k | NodeAddr<BlockNode*> SBA = findBlock(SB); |
1680 | 4.40k | linkBlockRefs(DefM, SBA); |
1681 | 4.40k | } |
1682 | 11.1k | |
1683 | 11.1k | // Link the phi uses from the successor blocks. |
1684 | 88.0k | auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool { |
1685 | 88.0k | if (NA.Addr->getKind() != NodeAttrs::Use) |
1686 | 25.2k | return false; |
1687 | 62.7k | assert(NA.Addr->getFlags() & NodeAttrs::PhiRef); |
1688 | 62.7k | NodeAddr<PhiUseNode*> PUA = NA; |
1689 | 62.7k | return PUA.Addr->getPredecessor() == BA.Id; |
1690 | 62.7k | }; |
1691 | 11.1k | |
1692 | 11.1k | RegisterSet EHLiveIns = getLandingPadLiveIns(); |
1693 | 11.1k | MachineBasicBlock *MBB = BA.Addr->getCode(); |
1694 | 11.1k | |
1695 | 11.1k | for (MachineBasicBlock *SB : MBB->successors()) { |
1696 | 6.42k | bool IsEHPad = SB->isEHPad(); |
1697 | 6.42k | NodeAddr<BlockNode*> SBA = findBlock(SB); |
1698 | 25.3k | for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) { |
1699 | 25.3k | // Do not link phi uses for landing pad live-ins. |
1700 | 25.3k | if (IsEHPad) { |
1701 | 44 | // Find what register this phi is for. |
1702 | 44 | NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this); |
1703 | 44 | assert(RA.Id != 0); |
1704 | 44 | if (EHLiveIns.count(RA.Addr->getRegRef(*this))) |
1705 | 44 | continue; |
1706 | 25.2k | } |
1707 | 25.2k | // Go over each phi use associated with MBB, and link it. |
1708 | 25.2k | for (auto U : IA.Addr->members_if(IsUseForBA, *this)) { |
1709 | 25.2k | NodeAddr<PhiUseNode*> PUA = U; |
1710 | 25.2k | RegisterRef RR = PUA.Addr->getRegRef(*this); |
1711 | 25.2k | linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]); |
1712 | 25.2k | } |
1713 | 25.2k | } |
1714 | 6.42k | } |
1715 | 11.1k | |
1716 | 11.1k | // Pop all defs from this block from the definition stacks. |
1717 | 11.1k | releaseBlock(BA.Id, DefM); |
1718 | 11.1k | } |
1719 | | |
1720 | | // Remove the use node UA from any data-flow and structural links. |
1721 | 8.89k | void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) { |
1722 | 8.89k | NodeId RD = UA.Addr->getReachingDef(); |
1723 | 8.89k | NodeId Sib = UA.Addr->getSibling(); |
1724 | 8.89k | |
1725 | 8.89k | if (RD == 0) { |
1726 | 2.21k | assert(Sib == 0); |
1727 | 2.21k | return; |
1728 | 2.21k | } |
1729 | 6.68k | |
1730 | 6.68k | auto RDA = addr<DefNode*>(RD); |
1731 | 6.68k | auto TA = addr<UseNode*>(RDA.Addr->getReachedUse()); |
1732 | 6.68k | if (TA.Id == UA.Id) { |
1733 | 5.30k | RDA.Addr->setReachedUse(Sib); |
1734 | 5.30k | return; |
1735 | 5.30k | } |
1736 | 1.37k | |
1737 | 1.72k | while (1.37k TA.Id != 0) { |
1738 | 1.72k | NodeId S = TA.Addr->getSibling(); |
1739 | 1.72k | if (S == UA.Id) { |
1740 | 1.37k | TA.Addr->setSibling(UA.Addr->getSibling()); |
1741 | 1.37k | return; |
1742 | 1.37k | } |
1743 | 357 | TA = addr<UseNode*>(S); |
1744 | 357 | } |
1745 | 1.37k | } |
1746 | | |
1747 | | // Remove the def node DA from any data-flow and structural links. |
1748 | 4.13k | void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) { |
1749 | 4.13k | // |
1750 | 4.13k | // RD |
1751 | 4.13k | // | reached |
1752 | 4.13k | // | def |
1753 | 4.13k | // : |
1754 | 4.13k | // . |
1755 | 4.13k | // +----+ |
1756 | 4.13k | // ... -- | DA | -- ... -- 0 : sibling chain of DA |
1757 | 4.13k | // +----+ |
1758 | 4.13k | // | | reached |
1759 | 4.13k | // | : def |
1760 | 4.13k | // | . |
1761 | 4.13k | // | ... : Siblings (defs) |
1762 | 4.13k | // | |
1763 | 4.13k | // : reached |
1764 | 4.13k | // . use |
1765 | 4.13k | // ... : sibling chain of reached uses |
1766 | 4.13k | |
1767 | 4.13k | NodeId RD = DA.Addr->getReachingDef(); |
1768 | 4.13k | |
1769 | 4.13k | // Visit all siblings of the reached def and reset their reaching defs. |
1770 | 4.13k | // Also, defs reached by DA are now "promoted" to being reached by RD, |
1771 | 4.13k | // so all of them will need to be spliced into the sibling chain where |
1772 | 4.13k | // DA belongs. |
1773 | 8.27k | auto getAllNodes = [this] (NodeId N) -> NodeList { |
1774 | 8.27k | NodeList Res; |
1775 | 10.0k | while (N) { |
1776 | 1.79k | auto RA = addr<RefNode*>(N); |
1777 | 1.79k | // Keep the nodes in the exact sibling order. |
1778 | 1.79k | Res.push_back(RA); |
1779 | 1.79k | N = RA.Addr->getSibling(); |
1780 | 1.79k | } |
1781 | 8.27k | return Res; |
1782 | 8.27k | }; |
1783 | 4.13k | NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef()); |
1784 | 4.13k | NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse()); |
1785 | 4.13k | |
1786 | 4.13k | if (RD == 0) { |
1787 | 4.10k | for (NodeAddr<RefNode*> I : ReachedDefs) |
1788 | 1.77k | I.Addr->setSibling(0); |
1789 | 4.10k | for (NodeAddr<RefNode*> I : ReachedUses) |
1790 | 6 | I.Addr->setSibling(0); |
1791 | 4.10k | } |
1792 | 4.13k | for (NodeAddr<DefNode*> I : ReachedDefs) |
1793 | 1.78k | I.Addr->setReachingDef(RD); |
1794 | 4.13k | for (NodeAddr<UseNode*> I : ReachedUses) |
1795 | 6 | I.Addr->setReachingDef(RD); |
1796 | 4.13k | |
1797 | 4.13k | NodeId Sib = DA.Addr->getSibling(); |
1798 | 4.13k | if (RD == 0) { |
1799 | 4.10k | assert(Sib == 0); |
1800 | 4.10k | return; |
1801 | 4.10k | } |
1802 | 31 | |
1803 | 31 | // Update the reaching def node and remove DA from the sibling list. |
1804 | 31 | auto RDA = addr<DefNode*>(RD); |
1805 | 31 | auto TA = addr<DefNode*>(RDA.Addr->getReachedDef()); |
1806 | 31 | if (TA.Id == DA.Id) { |
1807 | 27 | // If DA is the first reached def, just update the RD's reached def |
1808 | 27 | // to the DA's sibling. |
1809 | 27 | RDA.Addr->setReachedDef(Sib); |
1810 | 27 | } else { |
1811 | 4 | // Otherwise, traverse the sibling list of the reached defs and remove |
1812 | 4 | // DA from it. |
1813 | 4 | while (TA.Id != 0) { |
1814 | 4 | NodeId S = TA.Addr->getSibling(); |
1815 | 4 | if (S == DA.Id) { |
1816 | 4 | TA.Addr->setSibling(Sib); |
1817 | 4 | break; |
1818 | 4 | } |
1819 | 0 | TA = addr<DefNode*>(S); |
1820 | 0 | } |
1821 | 4 | } |
1822 | 31 | |
1823 | 31 | // Splice the DA's reached defs into the RDA's reached def chain. |
1824 | 31 | if (!ReachedDefs.empty()) { |
1825 | 10 | auto Last = NodeAddr<DefNode*>(ReachedDefs.back()); |
1826 | 10 | Last.Addr->setSibling(RDA.Addr->getReachedDef()); |
1827 | 10 | RDA.Addr->setReachedDef(ReachedDefs.front().Id); |
1828 | 10 | } |
1829 | 31 | // Splice the DA's reached uses into the RDA's reached use chain. |
1830 | 31 | if (!ReachedUses.empty()) { |
1831 | 0 | auto Last = NodeAddr<UseNode*>(ReachedUses.back()); |
1832 | 0 | Last.Addr->setSibling(RDA.Addr->getReachedUse()); |
1833 | 0 | RDA.Addr->setReachedUse(ReachedUses.front().Id); |
1834 | 0 | } |
1835 | 31 | } |