/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
Line | Count | Source (jump to first uncovered line) |
1 | | //===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This implements routines for translating from LLVM IR into SelectionDAG IR. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H |
15 | | #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H |
16 | | |
17 | | #include "StatepointLowering.h" |
18 | | #include "llvm/ADT/APInt.h" |
19 | | #include "llvm/ADT/ArrayRef.h" |
20 | | #include "llvm/ADT/DenseMap.h" |
21 | | #include "llvm/ADT/SmallVector.h" |
22 | | #include "llvm/Analysis/AliasAnalysis.h" |
23 | | #include "llvm/CodeGen/ISDOpcodes.h" |
24 | | #include "llvm/CodeGen/MachineValueType.h" |
25 | | #include "llvm/CodeGen/SelectionDAG.h" |
26 | | #include "llvm/CodeGen/SelectionDAGNodes.h" |
27 | | #include "llvm/CodeGen/ValueTypes.h" |
28 | | #include "llvm/IR/CallSite.h" |
29 | | #include "llvm/IR/DebugLoc.h" |
30 | | #include "llvm/IR/Instruction.h" |
31 | | #include "llvm/IR/Statepoint.h" |
32 | | #include "llvm/Support/BranchProbability.h" |
33 | | #include "llvm/Support/CodeGen.h" |
34 | | #include "llvm/Support/ErrorHandling.h" |
35 | | #include "llvm/Target/TargetLowering.h" |
36 | | #include <algorithm> |
37 | | #include <cassert> |
38 | | #include <cstdint> |
39 | | #include <utility> |
40 | | #include <vector> |
41 | | |
42 | | namespace llvm { |
43 | | |
44 | | class AllocaInst; |
45 | | class AtomicCmpXchgInst; |
46 | | class AtomicRMWInst; |
47 | | class BasicBlock; |
48 | | class BranchInst; |
49 | | class CallInst; |
50 | | class CatchPadInst; |
51 | | class CatchReturnInst; |
52 | | class CatchSwitchInst; |
53 | | class CleanupPadInst; |
54 | | class CleanupReturnInst; |
55 | | class Constant; |
56 | | class ConstantInt; |
57 | | class ConstrainedFPIntrinsic; |
58 | | class DbgValueInst; |
59 | | class DataLayout; |
60 | | class DIExpression; |
61 | | class DILocalVariable; |
62 | | class DILocation; |
63 | | class FenceInst; |
64 | | class FunctionLoweringInfo; |
65 | | class GCFunctionInfo; |
66 | | class GCRelocateInst; |
67 | | class GCResultInst; |
68 | | class IndirectBrInst; |
69 | | class InvokeInst; |
70 | | class LandingPadInst; |
71 | | class LLVMContext; |
72 | | class LoadInst; |
73 | | class MachineBasicBlock; |
74 | | class PHINode; |
75 | | class ResumeInst; |
76 | | class ReturnInst; |
77 | | class SDDbgValue; |
78 | | class StoreInst; |
79 | | class SwitchInst; |
80 | | class TargetLibraryInfo; |
81 | | class TargetMachine; |
82 | | class Type; |
83 | | class VAArgInst; |
84 | | class UnreachableInst; |
85 | | class Use; |
86 | | class User; |
87 | | class Value; |
88 | | |
89 | | //===----------------------------------------------------------------------===// |
90 | | /// SelectionDAGBuilder - This is the common target-independent lowering |
91 | | /// implementation that is parameterized by a TargetLowering object. |
92 | | /// |
93 | | class SelectionDAGBuilder { |
94 | | /// CurInst - The current instruction being visited |
95 | | const Instruction *CurInst = nullptr; |
96 | | |
97 | | DenseMap<const Value*, SDValue> NodeMap; |
98 | | |
99 | | /// UnusedArgNodeMap - Maps argument value for unused arguments. This is used |
100 | | /// to preserve debug information for incoming arguments. |
101 | | DenseMap<const Value*, SDValue> UnusedArgNodeMap; |
102 | | |
103 | | /// DanglingDebugInfo - Helper type for DanglingDebugInfoMap. |
104 | | class DanglingDebugInfo { |
105 | | const DbgValueInst* DI = nullptr; |
106 | | DebugLoc dl; |
107 | | unsigned SDNodeOrder = 0; |
108 | | |
109 | | public: |
110 | 6.49M | DanglingDebugInfo() = default; |
111 | | DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) |
112 | 38 | : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {} |
113 | | |
114 | 13.9M | const DbgValueInst* getDI() { return DI; } |
115 | 35 | DebugLoc getdl() { return dl; } |
116 | 35 | unsigned getSDNodeOrder() { return SDNodeOrder; } |
117 | | }; |
118 | | |
119 | | /// DanglingDebugInfoMap - Keeps track of dbg_values for which we have not |
120 | | /// yet seen the referent. We defer handling these until we do see it. |
121 | | DenseMap<const Value*, DanglingDebugInfo> DanglingDebugInfoMap; |
122 | | |
123 | | public: |
124 | | /// PendingLoads - Loads are not emitted to the program immediately. We bunch |
125 | | /// them up and then emit token factor nodes when possible. This allows us to |
126 | | /// get simple disambiguation between loads without worrying about alias |
127 | | /// analysis. |
128 | | SmallVector<SDValue, 8> PendingLoads; |
129 | | |
130 | | /// State used while lowering a statepoint sequence (gc_statepoint, |
131 | | /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details. |
132 | | StatepointLoweringState StatepointLowering; |
133 | | |
134 | | private: |
135 | | /// PendingExports - CopyToReg nodes that copy values to virtual registers |
136 | | /// for export to other blocks need to be emitted before any terminator |
137 | | /// instruction, but they have no other ordering requirements. We bunch them |
138 | | /// up and the emit a single tokenfactor for them just before terminator |
139 | | /// instructions. |
140 | | SmallVector<SDValue, 8> PendingExports; |
141 | | |
142 | | /// SDNodeOrder - A unique monotonically increasing number used to order the |
143 | | /// SDNodes we create. |
144 | | unsigned SDNodeOrder; |
145 | | |
146 | | enum CaseClusterKind { |
147 | | /// A cluster of adjacent case labels with the same destination, or just one |
148 | | /// case. |
149 | | CC_Range, |
150 | | /// A cluster of cases suitable for jump table lowering. |
151 | | CC_JumpTable, |
152 | | /// A cluster of cases suitable for bit test lowering. |
153 | | CC_BitTests |
154 | | }; |
155 | | |
156 | | /// A cluster of case labels. |
157 | | struct CaseCluster { |
158 | | CaseClusterKind Kind; |
159 | | const ConstantInt *Low, *High; |
160 | | union { |
161 | | MachineBasicBlock *MBB; |
162 | | unsigned JTCasesIndex; |
163 | | unsigned BTCasesIndex; |
164 | | }; |
165 | | BranchProbability Prob; |
166 | | |
167 | | static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, |
168 | 93.2k | MachineBasicBlock *MBB, BranchProbability Prob) { |
169 | 93.2k | CaseCluster C; |
170 | 93.2k | C.Kind = CC_Range; |
171 | 93.2k | C.Low = Low; |
172 | 93.2k | C.High = High; |
173 | 93.2k | C.MBB = MBB; |
174 | 93.2k | C.Prob = Prob; |
175 | 93.2k | return C; |
176 | 93.2k | } |
177 | | |
178 | | static CaseCluster jumpTable(const ConstantInt *Low, |
179 | | const ConstantInt *High, unsigned JTCasesIndex, |
180 | 5.16k | BranchProbability Prob) { |
181 | 5.16k | CaseCluster C; |
182 | 5.16k | C.Kind = CC_JumpTable; |
183 | 5.16k | C.Low = Low; |
184 | 5.16k | C.High = High; |
185 | 5.16k | C.JTCasesIndex = JTCasesIndex; |
186 | 5.16k | C.Prob = Prob; |
187 | 5.16k | return C; |
188 | 5.16k | } |
189 | | |
190 | | static CaseCluster bitTests(const ConstantInt *Low, const ConstantInt *High, |
191 | 621 | unsigned BTCasesIndex, BranchProbability Prob) { |
192 | 621 | CaseCluster C; |
193 | 621 | C.Kind = CC_BitTests; |
194 | 621 | C.Low = Low; |
195 | 621 | C.High = High; |
196 | 621 | C.BTCasesIndex = BTCasesIndex; |
197 | 621 | C.Prob = Prob; |
198 | 621 | return C; |
199 | 621 | } |
200 | | }; |
201 | | |
202 | | using CaseClusterVector = std::vector<CaseCluster>; |
203 | | using CaseClusterIt = CaseClusterVector::iterator; |
204 | | |
205 | | struct CaseBits { |
206 | | uint64_t Mask = 0; |
207 | | MachineBasicBlock* BB = nullptr; |
208 | | unsigned Bits = 0; |
209 | | BranchProbability ExtraProb; |
210 | | |
211 | | CaseBits() = default; |
212 | | CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits, |
213 | | BranchProbability Prob): |
214 | 775 | Mask(mask), BB(bb), Bits(bits), ExtraProb(Prob) {} |
215 | | }; |
216 | | |
217 | | using CaseBitsVector = std::vector<CaseBits>; |
218 | | |
219 | | /// Sort Clusters and merge adjacent cases. |
220 | | void sortAndRangeify(CaseClusterVector &Clusters); |
221 | | |
222 | | /// CaseBlock - This structure is used to communicate between |
223 | | /// SelectionDAGBuilder and SDISel for the code generation of additional basic |
224 | | /// blocks needed by multi-case switch statements. |
225 | | struct CaseBlock { |
226 | | // CC - the condition code to use for the case block's setcc node |
227 | | ISD::CondCode CC; |
228 | | |
229 | | // CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit. |
230 | | // Emit by default LHS op RHS. MHS is used for range comparisons: |
231 | | // If MHS is not null: (LHS <= MHS) and (MHS <= RHS). |
232 | | const Value *CmpLHS, *CmpMHS, *CmpRHS; |
233 | | |
234 | | // TrueBB/FalseBB - the block to branch to if the setcc is true/false. |
235 | | MachineBasicBlock *TrueBB, *FalseBB; |
236 | | |
237 | | // ThisBB - the block into which to emit the code for the setcc and branches |
238 | | MachineBasicBlock *ThisBB; |
239 | | |
240 | | /// The debug location of the instruction this CaseBlock was |
241 | | /// produced from. |
242 | | SDLoc DL; |
243 | | |
244 | | // TrueProb/FalseProb - branch weights. |
245 | | BranchProbability TrueProb, FalseProb; |
246 | | |
247 | | CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs, |
248 | | const Value *cmpmiddle, MachineBasicBlock *truebb, |
249 | | MachineBasicBlock *falsebb, MachineBasicBlock *me, |
250 | | SDLoc dl, |
251 | | BranchProbability trueprob = BranchProbability::getUnknown(), |
252 | | BranchProbability falseprob = BranchProbability::getUnknown()) |
253 | | : CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs), |
254 | | TrueBB(truebb), FalseBB(falsebb), ThisBB(me), DL(dl), |
255 | 1.75M | TrueProb(trueprob), FalseProb(falseprob) {} |
256 | | }; |
257 | | |
258 | | struct JumpTable { |
259 | | /// Reg - the virtual register containing the index of the jump table entry |
260 | | //. to jump to. |
261 | | unsigned Reg; |
262 | | /// JTI - the JumpTableIndex for this jump table in the function. |
263 | | unsigned JTI; |
264 | | /// MBB - the MBB into which to emit the code for the indirect jump. |
265 | | MachineBasicBlock *MBB; |
266 | | /// Default - the MBB of the default bb, which is a successor of the range |
267 | | /// check MBB. This is when updating PHI nodes in successors. |
268 | | MachineBasicBlock *Default; |
269 | | |
270 | | JumpTable(unsigned R, unsigned J, MachineBasicBlock *M, |
271 | 5.16k | MachineBasicBlock *D): Reg(R), JTI(J), MBB(M), Default(D) {} |
272 | | }; |
273 | | struct JumpTableHeader { |
274 | | APInt First; |
275 | | APInt Last; |
276 | | const Value *SValue; |
277 | | MachineBasicBlock *HeaderBB; |
278 | | bool Emitted; |
279 | | |
280 | | JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H, |
281 | | bool E = false) |
282 | | : First(std::move(F)), Last(std::move(L)), SValue(SV), HeaderBB(H), |
283 | 5.16k | Emitted(E) {} |
284 | | }; |
285 | | using JumpTableBlock = std::pair<JumpTableHeader, JumpTable>; |
286 | | |
287 | | struct BitTestCase { |
288 | | uint64_t Mask; |
289 | | MachineBasicBlock *ThisBB; |
290 | | MachineBasicBlock *TargetBB; |
291 | | BranchProbability ExtraProb; |
292 | | |
293 | | BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr, |
294 | | BranchProbability Prob): |
295 | 775 | Mask(M), ThisBB(T), TargetBB(Tr), ExtraProb(Prob) {} |
296 | | }; |
297 | | |
298 | | using BitTestInfo = SmallVector<BitTestCase, 3>; |
299 | | |
300 | | struct BitTestBlock { |
301 | | APInt First; |
302 | | APInt Range; |
303 | | const Value *SValue; |
304 | | unsigned Reg; |
305 | | MVT RegVT; |
306 | | bool Emitted; |
307 | | bool ContiguousRange; |
308 | | MachineBasicBlock *Parent; |
309 | | MachineBasicBlock *Default; |
310 | | BitTestInfo Cases; |
311 | | BranchProbability Prob; |
312 | | BranchProbability DefaultProb; |
313 | | |
314 | | BitTestBlock(APInt F, APInt R, const Value *SV, unsigned Rg, MVT RgVT, |
315 | | bool E, bool CR, MachineBasicBlock *P, MachineBasicBlock *D, |
316 | | BitTestInfo C, BranchProbability Pr) |
317 | | : First(std::move(F)), Range(std::move(R)), SValue(SV), Reg(Rg), |
318 | | RegVT(RgVT), Emitted(E), ContiguousRange(CR), Parent(P), Default(D), |
319 | 621 | Cases(std::move(C)), Prob(Pr) {} |
320 | | }; |
321 | | |
322 | | /// Return the range of value in [First..Last]. |
323 | | uint64_t getJumpTableRange(const CaseClusterVector &Clusters, unsigned First, |
324 | | unsigned Last) const; |
325 | | |
326 | | /// Return the number of cases in [First..Last]. |
327 | | uint64_t getJumpTableNumCases(const SmallVectorImpl<unsigned> &TotalCases, |
328 | | unsigned First, unsigned Last) const; |
329 | | |
330 | | /// Build a jump table cluster from Clusters[First..Last]. Returns false if it |
331 | | /// decides it's not a good idea. |
332 | | bool buildJumpTable(const CaseClusterVector &Clusters, unsigned First, |
333 | | unsigned Last, const SwitchInst *SI, |
334 | | MachineBasicBlock *DefaultMBB, CaseCluster &JTCluster); |
335 | | |
336 | | /// Find clusters of cases suitable for jump table lowering. |
337 | | void findJumpTables(CaseClusterVector &Clusters, const SwitchInst *SI, |
338 | | MachineBasicBlock *DefaultMBB); |
339 | | |
340 | | /// Build a bit test cluster from Clusters[First..Last]. Returns false if it |
341 | | /// decides it's not a good idea. |
342 | | bool buildBitTests(CaseClusterVector &Clusters, unsigned First, unsigned Last, |
343 | | const SwitchInst *SI, CaseCluster &BTCluster); |
344 | | |
345 | | /// Find clusters of cases suitable for bit test lowering. |
346 | | void findBitTestClusters(CaseClusterVector &Clusters, const SwitchInst *SI); |
347 | | |
348 | | struct SwitchWorkListItem { |
349 | | MachineBasicBlock *MBB; |
350 | | CaseClusterIt FirstCluster; |
351 | | CaseClusterIt LastCluster; |
352 | | const ConstantInt *GE; |
353 | | const ConstantInt *LT; |
354 | | BranchProbability DefaultProb; |
355 | | }; |
356 | | using SwitchWorkList = SmallVector<SwitchWorkListItem, 4>; |
357 | | |
358 | | /// Determine the rank by weight of CC in [First,Last]. If CC has more weight |
359 | | /// than each cluster in the range, its rank is 0. |
360 | | static unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First, |
361 | | CaseClusterIt Last); |
362 | | |
363 | | /// Emit comparison and split W into two subtrees. |
364 | | void splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W, |
365 | | Value *Cond, MachineBasicBlock *SwitchMBB); |
366 | | |
367 | | /// Lower W. |
368 | | void lowerWorkItem(SwitchWorkListItem W, Value *Cond, |
369 | | MachineBasicBlock *SwitchMBB, |
370 | | MachineBasicBlock *DefaultMBB); |
371 | | |
372 | | |
373 | | /// A class which encapsulates all of the information needed to generate a |
374 | | /// stack protector check and signals to isel via its state being initialized |
375 | | /// that a stack protector needs to be generated. |
376 | | /// |
377 | | /// *NOTE* The following is a high level documentation of SelectionDAG Stack |
378 | | /// Protector Generation. The reason that it is placed here is for a lack of |
379 | | /// other good places to stick it. |
380 | | /// |
381 | | /// High Level Overview of SelectionDAG Stack Protector Generation: |
382 | | /// |
383 | | /// Previously, generation of stack protectors was done exclusively in the |
384 | | /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated |
385 | | /// splitting basic blocks at the IR level to create the success/failure basic |
386 | | /// blocks in the tail of the basic block in question. As a result of this, |
387 | | /// calls that would have qualified for the sibling call optimization were no |
388 | | /// longer eligible for optimization since said calls were no longer right in |
389 | | /// the "tail position" (i.e. the immediate predecessor of a ReturnInst |
390 | | /// instruction). |
391 | | /// |
392 | | /// Then it was noticed that since the sibling call optimization causes the |
393 | | /// callee to reuse the caller's stack, if we could delay the generation of |
394 | | /// the stack protector check until later in CodeGen after the sibling call |
395 | | /// decision was made, we get both the tail call optimization and the stack |
396 | | /// protector check! |
397 | | /// |
398 | | /// A few goals in solving this problem were: |
399 | | /// |
400 | | /// 1. Preserve the architecture independence of stack protector generation. |
401 | | /// |
402 | | /// 2. Preserve the normal IR level stack protector check for platforms like |
403 | | /// OpenBSD for which we support platform-specific stack protector |
404 | | /// generation. |
405 | | /// |
406 | | /// The main problem that guided the present solution is that one can not |
407 | | /// solve this problem in an architecture independent manner at the IR level |
408 | | /// only. This is because: |
409 | | /// |
410 | | /// 1. The decision on whether or not to perform a sibling call on certain |
411 | | /// platforms (for instance i386) requires lower level information |
412 | | /// related to available registers that can not be known at the IR level. |
413 | | /// |
414 | | /// 2. Even if the previous point were not true, the decision on whether to |
415 | | /// perform a tail call is done in LowerCallTo in SelectionDAG which |
416 | | /// occurs after the Stack Protector Pass. As a result, one would need to |
417 | | /// put the relevant callinst into the stack protector check success |
418 | | /// basic block (where the return inst is placed) and then move it back |
419 | | /// later at SelectionDAG/MI time before the stack protector check if the |
420 | | /// tail call optimization failed. The MI level option was nixed |
421 | | /// immediately since it would require platform-specific pattern |
422 | | /// matching. The SelectionDAG level option was nixed because |
423 | | /// SelectionDAG only processes one IR level basic block at a time |
424 | | /// implying one could not create a DAG Combine to move the callinst. |
425 | | /// |
426 | | /// To get around this problem a few things were realized: |
427 | | /// |
428 | | /// 1. While one can not handle multiple IR level basic blocks at the |
429 | | /// SelectionDAG Level, one can generate multiple machine basic blocks |
430 | | /// for one IR level basic block. This is how we handle bit tests and |
431 | | /// switches. |
432 | | /// |
433 | | /// 2. At the MI level, tail calls are represented via a special return |
434 | | /// MIInst called "tcreturn". Thus if we know the basic block in which we |
435 | | /// wish to insert the stack protector check, we get the correct behavior |
436 | | /// by always inserting the stack protector check right before the return |
437 | | /// statement. This is a "magical transformation" since no matter where |
438 | | /// the stack protector check intrinsic is, we always insert the stack |
439 | | /// protector check code at the end of the BB. |
440 | | /// |
441 | | /// Given the aforementioned constraints, the following solution was devised: |
442 | | /// |
443 | | /// 1. On platforms that do not support SelectionDAG stack protector check |
444 | | /// generation, allow for the normal IR level stack protector check |
445 | | /// generation to continue. |
446 | | /// |
447 | | /// 2. On platforms that do support SelectionDAG stack protector check |
448 | | /// generation: |
449 | | /// |
450 | | /// a. Use the IR level stack protector pass to decide if a stack |
451 | | /// protector is required/which BB we insert the stack protector check |
452 | | /// in by reusing the logic already therein. If we wish to generate a |
453 | | /// stack protector check in a basic block, we place a special IR |
454 | | /// intrinsic called llvm.stackprotectorcheck right before the BB's |
455 | | /// returninst or if there is a callinst that could potentially be |
456 | | /// sibling call optimized, before the call inst. |
457 | | /// |
458 | | /// b. Then when a BB with said intrinsic is processed, we codegen the BB |
459 | | /// normally via SelectBasicBlock. In said process, when we visit the |
460 | | /// stack protector check, we do not actually emit anything into the |
461 | | /// BB. Instead, we just initialize the stack protector descriptor |
462 | | /// class (which involves stashing information/creating the success |
463 | | /// mbbb and the failure mbb if we have not created one for this |
464 | | /// function yet) and export the guard variable that we are going to |
465 | | /// compare. |
466 | | /// |
467 | | /// c. After we finish selecting the basic block, in FinishBasicBlock if |
468 | | /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is |
469 | | /// initialized, we produce the validation code with one of these |
470 | | /// techniques: |
471 | | /// 1) with a call to a guard check function |
472 | | /// 2) with inlined instrumentation |
473 | | /// |
474 | | /// 1) We insert a call to the check function before the terminator. |
475 | | /// |
476 | | /// 2) We first find a splice point in the parent basic block |
477 | | /// before the terminator and then splice the terminator of said basic |
478 | | /// block into the success basic block. Then we code-gen a new tail for |
479 | | /// the parent basic block consisting of the two loads, the comparison, |
480 | | /// and finally two branches to the success/failure basic blocks. We |
481 | | /// conclude by code-gening the failure basic block if we have not |
482 | | /// code-gened it already (all stack protector checks we generate in |
483 | | /// the same function, use the same failure basic block). |
484 | | class StackProtectorDescriptor { |
485 | | public: |
486 | 35.1k | StackProtectorDescriptor() = default; |
487 | | |
488 | | /// Returns true if all fields of the stack protector descriptor are |
489 | | /// initialized implying that we should/are ready to emit a stack protector. |
490 | 3.30M | bool shouldEmitStackProtector() const { |
491 | 3.30M | return ParentMBB && SuccessMBB3.34k && FailureMBB3.34k ; |
492 | 3.30M | } |
493 | | |
494 | 3.30M | bool shouldEmitFunctionBasedCheckStackProtector() const { |
495 | 3.30M | return ParentMBB && !SuccessMBB3.40k && !FailureMBB66 ; |
496 | 3.30M | } |
497 | | |
498 | | /// Initialize the stack protector descriptor structure for a new basic |
499 | | /// block. |
500 | | void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, |
501 | 3.40k | bool FunctionBasedInstrumentation) { |
502 | 3.40k | // Make sure we are not initialized yet. |
503 | 3.40k | assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " |
504 | 3.40k | "already initialized!"); |
505 | 3.40k | ParentMBB = MBB; |
506 | 3.40k | if (!FunctionBasedInstrumentation3.40k ) { |
507 | 3.34k | SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true); |
508 | 3.34k | FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB); |
509 | 3.34k | } |
510 | 3.40k | } |
511 | | |
512 | | /// Reset state that changes when we handle different basic blocks. |
513 | | /// |
514 | | /// This currently includes: |
515 | | /// |
516 | | /// 1. The specific basic block we are generating a |
517 | | /// stack protector for (ParentMBB). |
518 | | /// |
519 | | /// 2. The successor machine basic block that will contain the tail of |
520 | | /// parent mbb after we create the stack protector check (SuccessMBB). This |
521 | | /// BB is visited only on stack protector check success. |
522 | 3.40k | void resetPerBBState() { |
523 | 3.40k | ParentMBB = nullptr; |
524 | 3.40k | SuccessMBB = nullptr; |
525 | 3.40k | } |
526 | | |
527 | | /// Reset state that only changes when we switch functions. |
528 | | /// |
529 | | /// This currently includes: |
530 | | /// |
531 | | /// 1. FailureMBB since we reuse the failure code path for all stack |
532 | | /// protector checks created in an individual function. |
533 | | /// |
534 | | /// 2.The guard variable since the guard variable we are checking against is |
535 | | /// always the same. |
536 | 436k | void resetPerFunctionState() { |
537 | 436k | FailureMBB = nullptr; |
538 | 436k | } |
539 | | |
540 | 3.40k | MachineBasicBlock *getParentMBB() { return ParentMBB; } |
541 | 6.68k | MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } |
542 | 6.68k | MachineBasicBlock *getFailureMBB() { return FailureMBB; } |
543 | | |
544 | | private: |
545 | | /// The basic block for which we are generating the stack protector. |
546 | | /// |
547 | | /// As a result of stack protector generation, we will splice the |
548 | | /// terminators of this basic block into the successor mbb SuccessMBB and |
549 | | /// replace it with a compare/branch to the successor mbbs |
550 | | /// SuccessMBB/FailureMBB depending on whether or not the stack protector |
551 | | /// was violated. |
552 | | MachineBasicBlock *ParentMBB = nullptr; |
553 | | |
554 | | /// A basic block visited on stack protector check success that contains the |
555 | | /// terminators of ParentMBB. |
556 | | MachineBasicBlock *SuccessMBB = nullptr; |
557 | | |
558 | | /// This basic block visited on stack protector check failure that will |
559 | | /// contain a call to __stack_chk_fail(). |
560 | | MachineBasicBlock *FailureMBB = nullptr; |
561 | | |
562 | | /// Add a successor machine basic block to ParentMBB. If the successor mbb |
563 | | /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic |
564 | | /// block will be created. Assign a large weight if IsLikely is true. |
565 | | MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB, |
566 | | MachineBasicBlock *ParentMBB, |
567 | | bool IsLikely, |
568 | | MachineBasicBlock *SuccMBB = nullptr); |
569 | | }; |
570 | | |
571 | | private: |
572 | | const TargetMachine &TM; |
573 | | |
574 | | public: |
575 | | /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling |
576 | | /// nodes without a corresponding SDNode. |
577 | | static const unsigned LowestSDNodeOrder = 1; |
578 | | |
579 | | SelectionDAG &DAG; |
580 | | const DataLayout *DL = nullptr; |
581 | | AliasAnalysis *AA = nullptr; |
582 | | const TargetLibraryInfo *LibInfo; |
583 | | |
584 | | /// SwitchCases - Vector of CaseBlock structures used to communicate |
585 | | /// SwitchInst code generation information. |
586 | | std::vector<CaseBlock> SwitchCases; |
587 | | |
588 | | /// JTCases - Vector of JumpTable structures used to communicate |
589 | | /// SwitchInst code generation information. |
590 | | std::vector<JumpTableBlock> JTCases; |
591 | | |
592 | | /// BitTestCases - Vector of BitTestBlock structures used to communicate |
593 | | /// SwitchInst code generation information. |
594 | | std::vector<BitTestBlock> BitTestCases; |
595 | | |
596 | | /// A StackProtectorDescriptor structure used to communicate stack protector |
597 | | /// information in between SelectBasicBlock and FinishBasicBlock. |
598 | | StackProtectorDescriptor SPDescriptor; |
599 | | |
600 | | // Emit PHI-node-operand constants only once even if used by multiple |
601 | | // PHI nodes. |
602 | | DenseMap<const Constant *, unsigned> ConstantsOut; |
603 | | |
604 | | /// FuncInfo - Information about the function as a whole. |
605 | | /// |
606 | | FunctionLoweringInfo &FuncInfo; |
607 | | |
608 | | /// GFI - Garbage collection metadata for the function. |
609 | | GCFunctionInfo *GFI; |
610 | | |
611 | | /// LPadToCallSiteMap - Map a landing pad to the call site indexes. |
612 | | DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap; |
613 | | |
614 | | /// HasTailCall - This is set to true if a call in the current |
615 | | /// block has been translated as a tail call. In this case, |
616 | | /// no subsequent DAG nodes should be created. |
617 | | bool HasTailCall = false; |
618 | | |
619 | | LLVMContext *Context; |
620 | | |
621 | | SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo, |
622 | | CodeGenOpt::Level ol) |
623 | | : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag), |
624 | 35.1k | FuncInfo(funcinfo) {} |
625 | | |
626 | | void init(GCFunctionInfo *gfi, AliasAnalysis *AA, |
627 | | const TargetLibraryInfo *li); |
628 | | |
629 | | /// Clear out the current SelectionDAG and the associated state and prepare |
630 | | /// this SelectionDAGBuilder object to be used for a new block. This doesn't |
631 | | /// clear out information about additional blocks that are needed to complete |
632 | | /// switch lowering or PHI node updating; that information is cleared out as |
633 | | /// it is consumed. |
634 | | void clear(); |
635 | | |
636 | | /// Clear the dangling debug information map. This function is separated from |
637 | | /// the clear so that debug information that is dangling in a basic block can |
638 | | /// be properly resolved in a different basic block. This allows the |
639 | | /// SelectionDAG to resolve dangling debug information attached to PHI nodes. |
640 | | void clearDanglingDebugInfo(); |
641 | | |
642 | | /// Return the current virtual root of the Selection DAG, flushing any |
643 | | /// PendingLoad items. This must be done before emitting a store or any other |
644 | | /// node that may need to be ordered after any prior load instructions. |
645 | | SDValue getRoot(); |
646 | | |
647 | | /// Similar to getRoot, but instead of flushing all the PendingLoad items, |
648 | | /// flush all the PendingExports items. It is necessary to do this before |
649 | | /// emitting a terminator instruction. |
650 | | SDValue getControlRoot(); |
651 | | |
652 | 37.6M | SDLoc getCurSDLoc() const { |
653 | 37.6M | return SDLoc(CurInst, SDNodeOrder); |
654 | 37.6M | } |
655 | | |
656 | 421k | DebugLoc getCurDebugLoc() const { |
657 | 421k | return CurInst ? CurInst->getDebugLoc()408k : DebugLoc()12.9k ; |
658 | 421k | } |
659 | | |
660 | | void CopyValueToVirtualRegister(const Value *V, unsigned Reg); |
661 | | |
662 | | void visit(const Instruction &I); |
663 | | |
664 | | void visit(unsigned Opcode, const User &I); |
665 | | |
666 | | /// getCopyFromRegs - If there was virtual register allocated for the value V |
667 | | /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. |
668 | | SDValue getCopyFromRegs(const Value *V, Type *Ty); |
669 | | |
670 | | // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, |
671 | | // generate the debug data structures now that we've seen its definition. |
672 | | void resolveDanglingDebugInfo(const Value *V, SDValue Val); |
673 | | |
674 | | SDValue getValue(const Value *V); |
675 | | bool findValue(const Value *V) const; |
676 | | |
677 | | SDValue getNonRegisterValue(const Value *V); |
678 | | SDValue getValueImpl(const Value *V); |
679 | | |
680 | 14.3M | void setValue(const Value *V, SDValue NewN) { |
681 | 14.3M | SDValue &N = NodeMap[V]; |
682 | 14.3M | assert(!N.getNode() && "Already set a value for this node!"); |
683 | 14.3M | N = NewN; |
684 | 14.3M | } |
685 | | |
686 | 50.5k | void setUnusedArgValue(const Value *V, SDValue NewN) { |
687 | 50.5k | SDValue &N = UnusedArgNodeMap[V]; |
688 | 50.5k | assert(!N.getNode() && "Already set a value for this node!"); |
689 | 50.5k | N = NewN; |
690 | 50.5k | } |
691 | | |
692 | | void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, |
693 | | MachineBasicBlock *FBB, MachineBasicBlock *CurBB, |
694 | | MachineBasicBlock *SwitchBB, |
695 | | Instruction::BinaryOps Opc, BranchProbability TW, |
696 | | BranchProbability FW, bool InvertCond); |
697 | | void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, |
698 | | MachineBasicBlock *FBB, |
699 | | MachineBasicBlock *CurBB, |
700 | | MachineBasicBlock *SwitchBB, |
701 | | BranchProbability TW, BranchProbability FW, |
702 | | bool InvertCond); |
703 | | bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases); |
704 | | bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB); |
705 | | void CopyToExportRegsIfNeeded(const Value *V); |
706 | | void ExportFromCurrentBlock(const Value *V); |
707 | | void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall, |
708 | | const BasicBlock *EHPadBB = nullptr); |
709 | | |
710 | | // Lower range metadata from 0 to N to assert zext to an integer of nearest |
711 | | // floor power of two. |
712 | | SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, |
713 | | SDValue Op); |
714 | | |
715 | | void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, |
716 | | ImmutableCallSite CS, unsigned ArgIdx, |
717 | | unsigned NumArgs, SDValue Callee, |
718 | | Type *ReturnTy, bool IsPatchPoint); |
719 | | |
720 | | std::pair<SDValue, SDValue> |
721 | | lowerInvokable(TargetLowering::CallLoweringInfo &CLI, |
722 | | const BasicBlock *EHPadBB = nullptr); |
723 | | |
724 | | /// UpdateSplitBlock - When an MBB was split during scheduling, update the |
725 | | /// references that need to refer to the last resulting block. |
726 | | void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last); |
727 | | |
728 | | /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes |
729 | | /// of lowering into a STATEPOINT node. |
730 | | struct StatepointLoweringInfo { |
731 | | /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set |
732 | | /// of gc pointers this STATEPOINT has to relocate. |
733 | | SmallVector<const Value *, 16> Bases; |
734 | | SmallVector<const Value *, 16> Ptrs; |
735 | | |
736 | | /// The set of gc.relocate calls associated with this gc.statepoint. |
737 | | SmallVector<const GCRelocateInst *, 16> GCRelocates; |
738 | | |
739 | | /// The full list of gc arguments to the gc.statepoint being lowered. |
740 | | ArrayRef<const Use> GCArgs; |
741 | | |
742 | | /// The gc.statepoint instruction. |
743 | | const Instruction *StatepointInstr = nullptr; |
744 | | |
745 | | /// The list of gc transition arguments present in the gc.statepoint being |
746 | | /// lowered. |
747 | | ArrayRef<const Use> GCTransitionArgs; |
748 | | |
749 | | /// The ID that the resulting STATEPOINT instruction has to report. |
750 | | unsigned ID = -1; |
751 | | |
752 | | /// Information regarding the underlying call instruction. |
753 | | TargetLowering::CallLoweringInfo CLI; |
754 | | |
755 | | /// The deoptimization state associated with this gc.statepoint call, if |
756 | | /// any. |
757 | | ArrayRef<const Use> DeoptState; |
758 | | |
759 | | /// Flags associated with the meta arguments being lowered. |
760 | | uint64_t StatepointFlags = -1; |
761 | | |
762 | | /// The number of patchable bytes the call needs to get lowered into. |
763 | | unsigned NumPatchBytes = -1; |
764 | | |
765 | | /// The exception handling unwind destination, in case this represents an |
766 | | /// invoke of gc.statepoint. |
767 | | const BasicBlock *EHPadBB = nullptr; |
768 | | |
769 | 69 | explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {} |
770 | | }; |
771 | | |
772 | | /// Lower \p SLI into a STATEPOINT instruction. |
773 | | SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SLI); |
774 | | |
775 | | // This function is responsible for the whole statepoint lowering process. |
776 | | // It uniformly handles invoke and call statepoints. |
777 | | void LowerStatepoint(ImmutableStatepoint Statepoint, |
778 | | const BasicBlock *EHPadBB = nullptr); |
779 | | |
780 | | void LowerCallSiteWithDeoptBundle(ImmutableCallSite CS, SDValue Callee, |
781 | | const BasicBlock *EHPadBB); |
782 | | |
783 | | void LowerDeoptimizeCall(const CallInst *CI); |
784 | | void LowerDeoptimizingReturn(); |
785 | | |
786 | | void LowerCallSiteWithDeoptBundleImpl(ImmutableCallSite CS, SDValue Callee, |
787 | | const BasicBlock *EHPadBB, |
788 | | bool VarArgDisallowed, |
789 | | bool ForceVoidReturnTy); |
790 | | |
791 | | /// Returns the type of FrameIndex and TargetFrameIndex nodes. |
792 | 132 | MVT getFrameIndexTy() { |
793 | 132 | return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()); |
794 | 132 | } |
795 | | |
796 | | private: |
797 | | // Terminator instructions. |
798 | | void visitRet(const ReturnInst &I); |
799 | | void visitBr(const BranchInst &I); |
800 | | void visitSwitch(const SwitchInst &I); |
801 | | void visitIndirectBr(const IndirectBrInst &I); |
802 | | void visitUnreachable(const UnreachableInst &I); |
803 | | void visitCleanupRet(const CleanupReturnInst &I); |
804 | | void visitCatchSwitch(const CatchSwitchInst &I); |
805 | | void visitCatchRet(const CatchReturnInst &I); |
806 | | void visitCatchPad(const CatchPadInst &I); |
807 | | void visitCleanupPad(const CleanupPadInst &CPI); |
808 | | |
809 | | BranchProbability getEdgeProbability(const MachineBasicBlock *Src, |
810 | | const MachineBasicBlock *Dst) const; |
811 | | void addSuccessorWithProb( |
812 | | MachineBasicBlock *Src, MachineBasicBlock *Dst, |
813 | | BranchProbability Prob = BranchProbability::getUnknown()); |
814 | | |
815 | | public: |
816 | | void visitSwitchCase(CaseBlock &CB, |
817 | | MachineBasicBlock *SwitchBB); |
818 | | void visitSPDescriptorParent(StackProtectorDescriptor &SPD, |
819 | | MachineBasicBlock *ParentBB); |
820 | | void visitSPDescriptorFailure(StackProtectorDescriptor &SPD); |
821 | | void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB); |
822 | | void visitBitTestCase(BitTestBlock &BB, |
823 | | MachineBasicBlock* NextMBB, |
824 | | BranchProbability BranchProbToNext, |
825 | | unsigned Reg, |
826 | | BitTestCase &B, |
827 | | MachineBasicBlock *SwitchBB); |
828 | | void visitJumpTable(JumpTable &JT); |
829 | | void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH, |
830 | | MachineBasicBlock *SwitchBB); |
831 | | |
832 | | private: |
833 | | // These all get lowered before this pass. |
834 | | void visitInvoke(const InvokeInst &I); |
835 | | void visitResume(const ResumeInst &I); |
836 | | |
837 | | void visitBinary(const User &I, unsigned OpCode); |
838 | | void visitShift(const User &I, unsigned Opcode); |
839 | 655k | void visitAdd(const User &I) { visitBinary(I, ISD::ADD); } |
840 | 60.2k | void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); } |
841 | 157k | void visitSub(const User &I) { visitBinary(I, ISD::SUB); } |
842 | | void visitFSub(const User &I); |
843 | 142k | void visitMul(const User &I) { visitBinary(I, ISD::MUL); } |
844 | 52.0k | void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); } |
845 | 1.94k | void visitURem(const User &I) { visitBinary(I, ISD::UREM); } |
846 | 4.68k | void visitSRem(const User &I) { visitBinary(I, ISD::SREM); } |
847 | 95 | void visitFRem(const User &I) { visitBinary(I, ISD::FREM); } |
848 | 13.3k | void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); } |
849 | | void visitSDiv(const User &I); |
850 | 51.6k | void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); } |
851 | 195k | void visitAnd (const User &I) { visitBinary(I, ISD::AND); } |
852 | 79.1k | void visitOr (const User &I) { visitBinary(I, ISD::OR); } |
853 | 24.3k | void visitXor (const User &I) { visitBinary(I, ISD::XOR); } |
854 | 110k | void visitShl (const User &I) { visitShift(I, ISD::SHL); } |
855 | 67.6k | void visitLShr(const User &I) { visitShift(I, ISD::SRL); } |
856 | 26.6k | void visitAShr(const User &I) { visitShift(I, ISD::SRA); } |
857 | | void visitICmp(const User &I); |
858 | | void visitFCmp(const User &I); |
859 | | // Visit the conversion instructions |
860 | | void visitTrunc(const User &I); |
861 | | void visitZExt(const User &I); |
862 | | void visitSExt(const User &I); |
863 | | void visitFPTrunc(const User &I); |
864 | | void visitFPExt(const User &I); |
865 | | void visitFPToUI(const User &I); |
866 | | void visitFPToSI(const User &I); |
867 | | void visitUIToFP(const User &I); |
868 | | void visitSIToFP(const User &I); |
869 | | void visitPtrToInt(const User &I); |
870 | | void visitIntToPtr(const User &I); |
871 | | void visitBitCast(const User &I); |
872 | | void visitAddrSpaceCast(const User &I); |
873 | | |
874 | | void visitExtractElement(const User &I); |
875 | | void visitInsertElement(const User &I); |
876 | | void visitShuffleVector(const User &I); |
877 | | |
878 | | void visitExtractValue(const User &I); |
879 | | void visitInsertValue(const User &I); |
880 | | void visitLandingPad(const LandingPadInst &I); |
881 | | |
882 | | void visitGetElementPtr(const User &I); |
883 | | void visitSelect(const User &I); |
884 | | |
885 | | void visitAlloca(const AllocaInst &I); |
886 | | void visitLoad(const LoadInst &I); |
887 | | void visitStore(const StoreInst &I); |
888 | | void visitMaskedLoad(const CallInst &I, bool IsExpanding = false); |
889 | | void visitMaskedStore(const CallInst &I, bool IsCompressing = false); |
890 | | void visitMaskedGather(const CallInst &I); |
891 | | void visitMaskedScatter(const CallInst &I); |
892 | | void visitAtomicCmpXchg(const AtomicCmpXchgInst &I); |
893 | | void visitAtomicRMW(const AtomicRMWInst &I); |
894 | | void visitFence(const FenceInst &I); |
895 | | void visitPHI(const PHINode &I); |
896 | | void visitCall(const CallInst &I); |
897 | | bool visitMemCmpCall(const CallInst &I); |
898 | | bool visitMemPCpyCall(const CallInst &I); |
899 | | bool visitMemChrCall(const CallInst &I); |
900 | | bool visitStrCpyCall(const CallInst &I, bool isStpcpy); |
901 | | bool visitStrCmpCall(const CallInst &I); |
902 | | bool visitStrLenCall(const CallInst &I); |
903 | | bool visitStrNLenCall(const CallInst &I); |
904 | | bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode); |
905 | | bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode); |
906 | | void visitAtomicLoad(const LoadInst &I); |
907 | | void visitAtomicStore(const StoreInst &I); |
908 | | void visitLoadFromSwiftError(const LoadInst &I); |
909 | | void visitStoreToSwiftError(const StoreInst &I); |
910 | | |
911 | | void visitInlineAsm(ImmutableCallSite CS); |
912 | | const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); |
913 | | void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); |
914 | | void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI); |
915 | | |
916 | | void visitVAStart(const CallInst &I); |
917 | | void visitVAArg(const VAArgInst &I); |
918 | | void visitVAEnd(const CallInst &I); |
919 | | void visitVACopy(const CallInst &I); |
920 | | void visitStackmap(const CallInst &I); |
921 | | void visitPatchpoint(ImmutableCallSite CS, |
922 | | const BasicBlock *EHPadBB = nullptr); |
923 | | |
924 | | // These two are implemented in StatepointLowering.cpp |
925 | | void visitGCRelocate(const GCRelocateInst &I); |
926 | | void visitGCResult(const GCResultInst &I); |
927 | | |
928 | | void visitVectorReduce(const CallInst &I, unsigned Intrinsic); |
929 | | |
930 | 0 | void visitUserOp1(const Instruction &I) { |
931 | 0 | llvm_unreachable("UserOp1 should not exist at instruction selection time!"); |
932 | 0 | } |
933 | 0 | void visitUserOp2(const Instruction &I) { |
934 | 0 | llvm_unreachable("UserOp2 should not exist at instruction selection time!"); |
935 | 0 | } |
936 | | |
937 | | void processIntegerCallValue(const Instruction &I, |
938 | | SDValue Value, bool IsSigned); |
939 | | |
940 | | void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB); |
941 | | |
942 | | void emitInlineAsmError(ImmutableCallSite CS, const Twine &Message); |
943 | | |
944 | | /// If V is an function argument then create corresponding DBG_VALUE machine |
945 | | /// instruction for it now. At the end of instruction selection, they will be |
946 | | /// inserted to the entry BB. |
947 | | bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable, |
948 | | DIExpression *Expr, DILocation *DL, |
949 | | bool IsDbgDeclare, const SDValue &N); |
950 | | |
951 | | /// Return the next block after MBB, or nullptr if there is none. |
952 | | MachineBasicBlock *NextBlock(MachineBasicBlock *MBB); |
953 | | |
954 | | /// Update the DAG and DAG builder with the relevant information after |
955 | | /// a new root node has been created which could be a tail call. |
956 | | void updateDAGForMaybeTailCall(SDValue MaybeTC); |
957 | | |
958 | | /// Return the appropriate SDDbgValue based on N. |
959 | | SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable, |
960 | | DIExpression *Expr, const DebugLoc &dl, |
961 | | unsigned DbgSDNodeOrder); |
962 | | }; |
963 | | |
964 | | /// RegsForValue - This struct represents the registers (physical or virtual) |
965 | | /// that a particular set of values is assigned, and the type information about |
966 | | /// the value. The most common situation is to represent one value at a time, |
967 | | /// but struct or array values are handled element-wise as multiple values. The |
968 | | /// splitting of aggregates is performed recursively, so that we never have |
969 | | /// aggregate-typed registers. The values at this point do not necessarily have |
970 | | /// legal types, so each value may require one or more registers of some legal |
971 | | /// type. |
972 | | /// |
973 | | struct RegsForValue { |
974 | | /// The value types of the values, which may not be legal, and |
975 | | /// may need be promoted or synthesized from one or more registers. |
976 | | SmallVector<EVT, 4> ValueVTs; |
977 | | |
978 | | /// The value types of the registers. This is the same size as ValueVTs and it |
979 | | /// records, for each value, what the type of the assigned register or |
980 | | /// registers are. (Individual values are never synthesized from more than one |
981 | | /// type of register.) |
982 | | /// |
983 | | /// With virtual registers, the contents of RegVTs is redundant with TLI's |
984 | | /// getRegisterType member function, however when with physical registers |
985 | | /// it is necessary to have a separate record of the types. |
986 | | SmallVector<MVT, 4> RegVTs; |
987 | | |
988 | | /// This list holds the registers assigned to the values. |
989 | | /// Each legal or promoted value requires one register, and each |
990 | | /// expanded value requires multiple registers. |
991 | | SmallVector<unsigned, 4> Regs; |
992 | | |
993 | | /// This list holds the number of registers for each value. |
994 | | SmallVector<unsigned, 4> RegCount; |
995 | | |
996 | | /// Records if this value needs to be treated in an ABI dependant manner, |
997 | | /// different to normal type legalization. |
998 | | bool IsABIMangled = false; |
999 | | |
1000 | 74.8k | RegsForValue() = default; |
1001 | | RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt, |
1002 | | bool IsABIMangledValue = false); |
1003 | | RegsForValue(LLVMContext &Context, const TargetLowering &TLI, |
1004 | | const DataLayout &DL, unsigned Reg, Type *Ty, |
1005 | | bool IsABIMangledValue = false); |
1006 | | |
1007 | | /// Add the specified values to this one. |
1008 | 3.62k | void append(const RegsForValue &RHS) { |
1009 | 3.62k | ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end()); |
1010 | 3.62k | RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end()); |
1011 | 3.62k | Regs.append(RHS.Regs.begin(), RHS.Regs.end()); |
1012 | 3.62k | RegCount.push_back(RHS.Regs.size()); |
1013 | 3.62k | } |
1014 | | |
1015 | | /// Emit a series of CopyFromReg nodes that copies from this value and returns |
1016 | | /// the result as a ValueVTs value. This uses Chain/Flag as the input and |
1017 | | /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no |
1018 | | /// flag is used. |
1019 | | SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, |
1020 | | const SDLoc &dl, SDValue &Chain, SDValue *Flag, |
1021 | | const Value *V = nullptr) const; |
1022 | | |
1023 | | /// Emit a series of CopyToReg nodes that copies the specified value into the |
1024 | | /// registers specified by this object. This uses Chain/Flag as the input and |
1025 | | /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no |
1026 | | /// flag is used. If V is not nullptr, then it is used in printing better |
1027 | | /// diagnostic messages on error. |
1028 | | void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, |
1029 | | SDValue &Chain, SDValue *Flag, const Value *V = nullptr, |
1030 | | ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const; |
1031 | | |
1032 | | /// Add this value to the specified inlineasm node operand list. This adds the |
1033 | | /// code marker, matching input operand index (if applicable), and includes |
1034 | | /// the number of values added into it. |
1035 | | void AddInlineAsmOperands(unsigned Kind, bool HasMatching, |
1036 | | unsigned MatchingIdx, const SDLoc &dl, |
1037 | | SelectionDAG &DAG, std::vector<SDValue> &Ops) const; |
1038 | | }; |
1039 | | |
1040 | | } // end namespace llvm |
1041 | | |
1042 | | #endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H |