/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/CodeGen/MachineInstr.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // Methods common to all machine instructions. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "llvm/CodeGen/MachineInstr.h" |
14 | | #include "llvm/ADT/APFloat.h" |
15 | | #include "llvm/ADT/ArrayRef.h" |
16 | | #include "llvm/ADT/FoldingSet.h" |
17 | | #include "llvm/ADT/Hashing.h" |
18 | | #include "llvm/ADT/None.h" |
19 | | #include "llvm/ADT/STLExtras.h" |
20 | | #include "llvm/ADT/SmallBitVector.h" |
21 | | #include "llvm/ADT/SmallString.h" |
22 | | #include "llvm/ADT/SmallVector.h" |
23 | | #include "llvm/Analysis/AliasAnalysis.h" |
24 | | #include "llvm/Analysis/Loads.h" |
25 | | #include "llvm/Analysis/MemoryLocation.h" |
26 | | #include "llvm/CodeGen/GlobalISel/RegisterBank.h" |
27 | | #include "llvm/CodeGen/MachineBasicBlock.h" |
28 | | #include "llvm/CodeGen/MachineFrameInfo.h" |
29 | | #include "llvm/CodeGen/MachineFunction.h" |
30 | | #include "llvm/CodeGen/MachineInstrBuilder.h" |
31 | | #include "llvm/CodeGen/MachineInstrBundle.h" |
32 | | #include "llvm/CodeGen/MachineMemOperand.h" |
33 | | #include "llvm/CodeGen/MachineModuleInfo.h" |
34 | | #include "llvm/CodeGen/MachineOperand.h" |
35 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
36 | | #include "llvm/CodeGen/PseudoSourceValue.h" |
37 | | #include "llvm/CodeGen/TargetInstrInfo.h" |
38 | | #include "llvm/CodeGen/TargetRegisterInfo.h" |
39 | | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
40 | | #include "llvm/Config/llvm-config.h" |
41 | | #include "llvm/IR/Constants.h" |
42 | | #include "llvm/IR/DebugInfoMetadata.h" |
43 | | #include "llvm/IR/DebugLoc.h" |
44 | | #include "llvm/IR/DerivedTypes.h" |
45 | | #include "llvm/IR/Function.h" |
46 | | #include "llvm/IR/InlineAsm.h" |
47 | | #include "llvm/IR/InstrTypes.h" |
48 | | #include "llvm/IR/Intrinsics.h" |
49 | | #include "llvm/IR/LLVMContext.h" |
50 | | #include "llvm/IR/Metadata.h" |
51 | | #include "llvm/IR/Module.h" |
52 | | #include "llvm/IR/ModuleSlotTracker.h" |
53 | | #include "llvm/IR/Operator.h" |
54 | | #include "llvm/IR/Type.h" |
55 | | #include "llvm/IR/Value.h" |
56 | | #include "llvm/MC/MCInstrDesc.h" |
57 | | #include "llvm/MC/MCRegisterInfo.h" |
58 | | #include "llvm/MC/MCSymbol.h" |
59 | | #include "llvm/Support/Casting.h" |
60 | | #include "llvm/Support/CommandLine.h" |
61 | | #include "llvm/Support/Compiler.h" |
62 | | #include "llvm/Support/Debug.h" |
63 | | #include "llvm/Support/ErrorHandling.h" |
64 | | #include "llvm/Support/LowLevelTypeImpl.h" |
65 | | #include "llvm/Support/MathExtras.h" |
66 | | #include "llvm/Support/raw_ostream.h" |
67 | | #include "llvm/Target/TargetIntrinsicInfo.h" |
68 | | #include "llvm/Target/TargetMachine.h" |
69 | | #include <algorithm> |
70 | | #include <cassert> |
71 | | #include <cstddef> |
72 | | #include <cstdint> |
73 | | #include <cstring> |
74 | | #include <iterator> |
75 | | #include <utility> |
76 | | |
77 | | using namespace llvm; |
78 | | |
79 | 137k | static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) { |
80 | 137k | if (const MachineBasicBlock *MBB = MI.getParent()) |
81 | 137k | if (const MachineFunction *MF = MBB->getParent()) |
82 | 137k | return MF; |
83 | 2 | return nullptr; |
84 | 2 | } |
85 | | |
86 | | // Try to crawl up to the machine function and get TRI and IntrinsicInfo from |
87 | | // it. |
88 | | static void tryToGetTargetInfo(const MachineInstr &MI, |
89 | | const TargetRegisterInfo *&TRI, |
90 | | const MachineRegisterInfo *&MRI, |
91 | | const TargetIntrinsicInfo *&IntrinsicInfo, |
92 | 135k | const TargetInstrInfo *&TII) { |
93 | 135k | |
94 | 135k | if (const MachineFunction *MF = getMFIfAvailable(MI)) { |
95 | 135k | TRI = MF->getSubtarget().getRegisterInfo(); |
96 | 135k | MRI = &MF->getRegInfo(); |
97 | 135k | IntrinsicInfo = MF->getTarget().getIntrinsicInfo(); |
98 | 135k | TII = MF->getSubtarget().getInstrInfo(); |
99 | 135k | } |
100 | 135k | } |
101 | | |
102 | 64.8M | void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) { |
103 | 64.8M | if (MCID->ImplicitDefs) |
104 | 14.7M | for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); 7.01M *ImpDefs; |
105 | 7.77M | ++ImpDefs) |
106 | 7.77M | addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true)); |
107 | 64.8M | if (MCID->ImplicitUses) |
108 | 20.5M | for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); 10.0M *ImpUses; |
109 | 10.5M | ++ImpUses) |
110 | 10.5M | addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true)); |
111 | 64.8M | } |
112 | | |
113 | | /// MachineInstr ctor - This constructor creates a MachineInstr and adds the |
114 | | /// implicit operands. It reserves space for the number of operands specified by |
115 | | /// the MCInstrDesc. |
116 | | MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid, |
117 | | DebugLoc dl, bool NoImp) |
118 | 64.8M | : MCID(&tid), debugLoc(std::move(dl)) { |
119 | 64.8M | assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); |
120 | 64.8M | |
121 | 64.8M | // Reserve space for the expected number of operands. |
122 | 64.8M | if (unsigned NumOps = MCID->getNumOperands() + |
123 | 64.1M | MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) { |
124 | 64.1M | CapOperands = OperandCapacity::get(NumOps); |
125 | 64.1M | Operands = MF.allocateOperandArray(CapOperands); |
126 | 64.1M | } |
127 | 64.8M | |
128 | 64.8M | if (!NoImp) |
129 | 64.7M | addImplicitDefUseOperands(MF); |
130 | 64.8M | } |
131 | | |
132 | | /// MachineInstr ctor - Copies MachineInstr arg exactly |
133 | | /// |
134 | | MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI) |
135 | 1.74M | : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) { |
136 | 1.74M | assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); |
137 | 1.74M | |
138 | 1.74M | CapOperands = OperandCapacity::get(MI.getNumOperands()); |
139 | 1.74M | Operands = MF.allocateOperandArray(CapOperands); |
140 | 1.74M | |
141 | 1.74M | // Copy operands. |
142 | 1.74M | for (const MachineOperand &MO : MI.operands()) |
143 | 5.03M | addOperand(MF, MO); |
144 | 1.74M | |
145 | 1.74M | // Copy all the sensible flags. |
146 | 1.74M | setFlags(MI.Flags); |
147 | 1.74M | } |
148 | | |
149 | | /// getRegInfo - If this instruction is embedded into a MachineFunction, |
150 | | /// return the MachineRegisterInfo object for the current function, otherwise |
151 | | /// return null. |
152 | 199M | MachineRegisterInfo *MachineInstr::getRegInfo() { |
153 | 199M | if (MachineBasicBlock *MBB = getParent()) |
154 | 144M | return &MBB->getParent()->getRegInfo(); |
155 | 55.1M | return nullptr; |
156 | 55.1M | } |
157 | | |
158 | | /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in |
159 | | /// this instruction from their respective use lists. This requires that the |
160 | | /// operands already be on their use lists. |
161 | 41.6M | void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) { |
162 | 41.6M | for (MachineOperand &MO : operands()) |
163 | 106M | if (MO.isReg()) |
164 | 73.8M | MRI.removeRegOperandFromUseList(&MO); |
165 | 41.6M | } |
166 | | |
167 | | /// AddRegOperandsToUseLists - Add all of the register operands in |
168 | | /// this instruction from their respective use lists. This requires that the |
169 | | /// operands not be on their use lists yet. |
170 | 66.5M | void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) { |
171 | 66.5M | for (MachineOperand &MO : operands()) |
172 | 55.1M | if (MO.isReg()) |
173 | 38.4M | MRI.addRegOperandToUseList(&MO); |
174 | 66.5M | } |
175 | | |
176 | 3.60M | void MachineInstr::addOperand(const MachineOperand &Op) { |
177 | 3.60M | MachineBasicBlock *MBB = getParent(); |
178 | 3.60M | assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs"); |
179 | 3.60M | MachineFunction *MF = MBB->getParent(); |
180 | 3.60M | assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs"); |
181 | 3.60M | addOperand(*MF, Op); |
182 | 3.60M | } |
183 | | |
184 | | /// Move NumOps MachineOperands from Src to Dst, with support for overlapping |
185 | | /// ranges. If MRI is non-null also update use-def chains. |
186 | | static void moveOperands(MachineOperand *Dst, MachineOperand *Src, |
187 | 36.5M | unsigned NumOps, MachineRegisterInfo *MRI) { |
188 | 36.5M | if (MRI) |
189 | 24.7M | return MRI->moveOperands(Dst, Src, NumOps); |
190 | 11.7M | |
191 | 11.7M | // MachineOperand is a trivially copyable type so we can just use memmove. |
192 | 11.7M | std::memmove(Dst, Src, NumOps * sizeof(MachineOperand)); |
193 | 11.7M | } |
194 | | |
195 | | /// addOperand - Add the specified operand to the instruction. If it is an |
196 | | /// implicit operand, it is added to the end of the operand list. If it is |
197 | | /// an explicit operand it is added at the end of the explicit operand list |
198 | | /// (before the first implicit operand). |
199 | 198M | void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) { |
200 | 198M | assert(MCID && "Cannot add operands before providing an instr descriptor"); |
201 | 198M | |
202 | 198M | // Check if we're adding one of our existing operands. |
203 | 198M | if (&Op >= Operands && &Op < Operands + NumOperands186M ) { |
204 | 2.10k | // This is unusual: MI->addOperand(MI->getOperand(i)). |
205 | 2.10k | // If adding Op requires reallocating or moving existing operands around, |
206 | 2.10k | // the Op reference could go stale. Support it by copying Op. |
207 | 2.10k | MachineOperand CopyOp(Op); |
208 | 2.10k | return addOperand(MF, CopyOp); |
209 | 2.10k | } |
210 | 198M | |
211 | 198M | // Find the insert location for the new operand. Implicit registers go at |
212 | 198M | // the end, everything else goes before the implicit regs. |
213 | 198M | // |
214 | 198M | // FIXME: Allow mixed explicit and implicit operands on inline asm. |
215 | 198M | // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as |
216 | 198M | // implicit-defs, but they must not be moved around. See the FIXME in |
217 | 198M | // InstrEmitter.cpp. |
218 | 198M | unsigned OpNo = getNumOperands(); |
219 | 198M | bool isImpReg = Op.isReg() && Op.isImplicit()136M ; |
220 | 198M | if (!isImpReg && !isInlineAsm()169M ) { |
221 | 212M | while (OpNo && Operands[OpNo-1].isReg()146M && Operands[OpNo-1].isImplicit()122M ) { |
222 | 42.3M | --OpNo; |
223 | 42.3M | assert(!Operands[OpNo].isTied() && "Cannot move tied operands"); |
224 | 42.3M | } |
225 | 169M | } |
226 | 198M | |
227 | | #ifndef NDEBUG |
228 | | bool isDebugOp = Op.getType() == MachineOperand::MO_Metadata || |
229 | | Op.getType() == MachineOperand::MO_MCSymbol; |
230 | | // OpNo now points as the desired insertion point. Unless this is a variadic |
231 | | // instruction, only implicit regs are allowed beyond MCID->getNumOperands(). |
232 | | // RegMask operands go between the explicit and implicit operands. |
233 | | assert((isImpReg || Op.isRegMask() || MCID->isVariadic() || |
234 | | OpNo < MCID->getNumOperands() || isDebugOp) && |
235 | | "Trying to add an operand to a machine instr that is already done!"); |
236 | | #endif |
237 | | |
238 | 198M | MachineRegisterInfo *MRI = getRegInfo(); |
239 | 198M | |
240 | 198M | // Determine if the Operands array needs to be reallocated. |
241 | 198M | // Save the old capacity and operand array. |
242 | 198M | OperandCapacity OldCap = CapOperands; |
243 | 198M | MachineOperand *OldOperands = Operands; |
244 | 198M | if (!OldOperands || OldCap.getSize() == getNumOperands()197M ) { |
245 | 8.72M | CapOperands = OldOperands ? OldCap.getNext()8.23M : OldCap.get(1)486k ; |
246 | 8.72M | Operands = MF.allocateOperandArray(CapOperands); |
247 | 8.72M | // Move the operands before the insertion point. |
248 | 8.72M | if (OpNo) |
249 | 8.23M | moveOperands(Operands, OldOperands, OpNo, MRI); |
250 | 8.72M | } |
251 | 198M | |
252 | 198M | // Move the operands following the insertion point. |
253 | 198M | if (OpNo != NumOperands) |
254 | 28.0M | moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo, |
255 | 28.0M | MRI); |
256 | 198M | ++NumOperands; |
257 | 198M | |
258 | 198M | // Deallocate the old operand array. |
259 | 198M | if (OldOperands != Operands && OldOperands8.72M ) |
260 | 8.23M | MF.deallocateOperandArray(OldCap, OldOperands); |
261 | 198M | |
262 | 198M | // Copy Op into place. It still needs to be inserted into the MRI use lists. |
263 | 198M | MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op); |
264 | 198M | NewMO->ParentMI = this; |
265 | 198M | |
266 | 198M | // When adding a register operand, tell MRI about it. |
267 | 198M | if (NewMO->isReg()) { |
268 | 136M | // Ensure isOnRegUseList() returns false, regardless of Op's status. |
269 | 136M | NewMO->Contents.Reg.Prev = nullptr; |
270 | 136M | // Ignore existing ties. This is not a property that can be copied. |
271 | 136M | NewMO->TiedTo = 0; |
272 | 136M | // Add the new operand to MRI, but only for instructions in an MBB. |
273 | 136M | if (MRI) |
274 | 97.6M | MRI->addRegOperandToUseList(NewMO); |
275 | 136M | // The MCID operand information isn't accurate until we start adding |
276 | 136M | // explicit operands. The implicit operands are added first, then the |
277 | 136M | // explicits are inserted before them. |
278 | 136M | if (!isImpReg) { |
279 | 107M | // Tie uses to defs as indicated in MCInstrDesc. |
280 | 107M | if (NewMO->isUse()) { |
281 | 62.9M | int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO); |
282 | 62.9M | if (DefIdx != -1) |
283 | 1.33M | tieOperands(DefIdx, OpNo); |
284 | 62.9M | } |
285 | 107M | // If the register operand is flagged as early, mark the operand as such. |
286 | 107M | if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1) |
287 | 416k | NewMO->setIsEarlyClobber(true); |
288 | 107M | } |
289 | 136M | } |
290 | 198M | } |
291 | | |
292 | | /// RemoveOperand - Erase an operand from an instruction, leaving it with one |
293 | | /// fewer operand than it started with. |
294 | | /// |
295 | 1.49M | void MachineInstr::RemoveOperand(unsigned OpNo) { |
296 | 1.49M | assert(OpNo < getNumOperands() && "Invalid operand number"); |
297 | 1.49M | untieRegOperand(OpNo); |
298 | 1.49M | |
299 | | #ifndef NDEBUG |
300 | | // Moving tied operands would break the ties. |
301 | | for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i) |
302 | | if (Operands[i].isReg()) |
303 | | assert(!Operands[i].isTied() && "Cannot move tied operands"); |
304 | | #endif |
305 | | |
306 | 1.49M | MachineRegisterInfo *MRI = getRegInfo(); |
307 | 1.49M | if (MRI1.49M && Operands[OpNo].isReg()) |
308 | 1.33M | MRI->removeRegOperandFromUseList(Operands + OpNo); |
309 | 1.49M | |
310 | 1.49M | // Don't call the MachineOperand destructor. A lot of this code depends on |
311 | 1.49M | // MachineOperand having a trivial destructor anyway, and adding a call here |
312 | 1.49M | // wouldn't make it 'destructor-correct'. |
313 | 1.49M | |
314 | 1.49M | if (unsigned N = NumOperands - 1 - OpNo) |
315 | 266k | moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI); |
316 | 1.49M | --NumOperands; |
317 | 1.49M | } |
318 | | |
319 | 5.91M | void MachineInstr::dropMemRefs(MachineFunction &MF) { |
320 | 5.91M | if (memoperands_empty()) |
321 | 5.77M | return; |
322 | 136k | |
323 | 136k | // See if we can just drop all of our extra info. |
324 | 136k | if (!getPreInstrSymbol() && !getPostInstrSymbol()136k ) { |
325 | 136k | Info.clear(); |
326 | 136k | return; |
327 | 136k | } |
328 | 2 | if (!getPostInstrSymbol()) { |
329 | 0 | Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol()); |
330 | 0 | return; |
331 | 0 | } |
332 | 2 | if (!getPreInstrSymbol()) { |
333 | 0 | Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol()); |
334 | 0 | return; |
335 | 0 | } |
336 | 2 | |
337 | 2 | // Otherwise allocate a fresh extra info with just these symbols. |
338 | 2 | Info.set<EIIK_OutOfLine>( |
339 | 2 | MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol())); |
340 | 2 | } |
341 | | |
342 | | void MachineInstr::setMemRefs(MachineFunction &MF, |
343 | 15.1M | ArrayRef<MachineMemOperand *> MMOs) { |
344 | 15.1M | if (MMOs.empty()) { |
345 | 5.90M | dropMemRefs(MF); |
346 | 5.90M | return; |
347 | 5.90M | } |
348 | 9.26M | |
349 | 9.26M | // Try to store a single MMO inline. |
350 | 9.26M | if (MMOs.size() == 1 && !getPreInstrSymbol()7.06M && !getPostInstrSymbol()7.06M ) { |
351 | 7.06M | Info.set<EIIK_MMO>(MMOs[0]); |
352 | 7.06M | return; |
353 | 7.06M | } |
354 | 2.19M | |
355 | 2.19M | // Otherwise create an extra info struct with all of our info. |
356 | 2.19M | Info.set<EIIK_OutOfLine>( |
357 | 2.19M | MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol())); |
358 | 2.19M | } |
359 | | |
360 | | void MachineInstr::addMemOperand(MachineFunction &MF, |
361 | 6.51M | MachineMemOperand *MO) { |
362 | 6.51M | SmallVector<MachineMemOperand *, 2> MMOs; |
363 | 6.51M | MMOs.append(memoperands_begin(), memoperands_end()); |
364 | 6.51M | MMOs.push_back(MO); |
365 | 6.51M | setMemRefs(MF, MMOs); |
366 | 6.51M | } |
367 | | |
368 | 16.1k | void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) { |
369 | 16.1k | if (this == &MI) |
370 | 0 | // Nothing to do for a self-clone! |
371 | 0 | return; |
372 | 16.1k | |
373 | 16.1k | assert(&MF == MI.getMF() && |
374 | 16.1k | "Invalid machine functions when cloning memory refrences!"); |
375 | 16.1k | // See if we can just steal the extra info already allocated for the |
376 | 16.1k | // instruction. We can do this whenever the pre- and post-instruction symbols |
377 | 16.1k | // are the same (including null). |
378 | 16.1k | if (getPreInstrSymbol() == MI.getPreInstrSymbol() && |
379 | 16.1k | getPostInstrSymbol() == MI.getPostInstrSymbol()) { |
380 | 16.1k | Info = MI.Info; |
381 | 16.1k | return; |
382 | 16.1k | } |
383 | 0 | |
384 | 0 | // Otherwise, fall back on a copy-based clone. |
385 | 0 | setMemRefs(MF, MI.memoperands()); |
386 | 0 | } |
387 | | |
388 | | /// Check to see if the MMOs pointed to by the two MemRefs arrays are |
389 | | /// identical. |
390 | | static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS, |
391 | 452k | ArrayRef<MachineMemOperand *> RHS) { |
392 | 452k | if (LHS.size() != RHS.size()) |
393 | 6.35k | return false; |
394 | 445k | |
395 | 445k | auto LHSPointees = make_pointee_range(LHS); |
396 | 445k | auto RHSPointees = make_pointee_range(RHS); |
397 | 445k | return std::equal(LHSPointees.begin(), LHSPointees.end(), |
398 | 445k | RHSPointees.begin()); |
399 | 445k | } |
400 | | |
401 | | void MachineInstr::cloneMergedMemRefs(MachineFunction &MF, |
402 | 459k | ArrayRef<const MachineInstr *> MIs) { |
403 | 459k | // Try handling easy numbers of MIs with simpler mechanisms. |
404 | 459k | if (MIs.empty()) { |
405 | 0 | dropMemRefs(MF); |
406 | 0 | return; |
407 | 0 | } |
408 | 459k | if (MIs.size() == 1) { |
409 | 0 | cloneMemRefs(MF, *MIs[0]); |
410 | 0 | return; |
411 | 0 | } |
412 | 459k | // Because an empty memoperands list provides *no* information and must be |
413 | 459k | // handled conservatively (assuming the instruction can do anything), the only |
414 | 459k | // way to merge with it is to drop all other memoperands. |
415 | 459k | if (MIs[0]->memoperands_empty()) { |
416 | 8.68k | dropMemRefs(MF); |
417 | 8.68k | return; |
418 | 8.68k | } |
419 | 450k | |
420 | 450k | // Handle the general case. |
421 | 450k | SmallVector<MachineMemOperand *, 2> MergedMMOs; |
422 | 450k | // Start with the first instruction. |
423 | 450k | assert(&MF == MIs[0]->getMF() && |
424 | 450k | "Invalid machine functions when cloning memory references!"); |
425 | 450k | MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end()); |
426 | 450k | // Now walk all the other instructions and accumulate any different MMOs. |
427 | 452k | for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) { |
428 | 452k | assert(&MF == MI.getMF() && |
429 | 452k | "Invalid machine functions when cloning memory references!"); |
430 | 452k | |
431 | 452k | // Skip MIs with identical operands to the first. This is a somewhat |
432 | 452k | // arbitrary hack but will catch common cases without being quadratic. |
433 | 452k | // TODO: We could fully implement merge semantics here if needed. |
434 | 452k | if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands())) |
435 | 182k | continue; |
436 | 269k | |
437 | 269k | // Because an empty memoperands list provides *no* information and must be |
438 | 269k | // handled conservatively (assuming the instruction can do anything), the |
439 | 269k | // only way to merge with it is to drop all other memoperands. |
440 | 269k | if (MI.memoperands_empty()) { |
441 | 0 | dropMemRefs(MF); |
442 | 0 | return; |
443 | 0 | } |
444 | 269k | |
445 | 269k | // Otherwise accumulate these into our temporary buffer of the merged state. |
446 | 269k | MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end()); |
447 | 269k | } |
448 | 450k | |
449 | 450k | setMemRefs(MF, MergedMMOs); |
450 | 450k | } |
451 | | |
452 | 364 | void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { |
453 | 364 | MCSymbol *OldSymbol = getPreInstrSymbol(); |
454 | 364 | if (OldSymbol == Symbol) |
455 | 330 | return; |
456 | 34 | if (OldSymbol && !Symbol0 ) { |
457 | 0 | // We're removing a symbol rather than adding one. Try to clean up any |
458 | 0 | // extra info carried around. |
459 | 0 | if (Info.is<EIIK_PreInstrSymbol>()) { |
460 | 0 | Info.clear(); |
461 | 0 | return; |
462 | 0 | } |
463 | 0 | |
464 | 0 | if (memoperands_empty()) { |
465 | 0 | assert(getPostInstrSymbol() && |
466 | 0 | "Should never have only a single symbol allocated out-of-line!"); |
467 | 0 | Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol()); |
468 | 0 | return; |
469 | 0 | } |
470 | 34 | |
471 | 34 | // Otherwise fallback on the generic update. |
472 | 34 | } else if (!Info || Info.is<EIIK_PreInstrSymbol>()1 ) { |
473 | 33 | // If we don't have any other extra info, we can store this inline. |
474 | 33 | Info.set<EIIK_PreInstrSymbol>(Symbol); |
475 | 33 | return; |
476 | 33 | } |
477 | 1 | |
478 | 1 | // Otherwise, allocate a full new set of extra info. |
479 | 1 | // FIXME: Maybe we should make the symbols in the extra info mutable? |
480 | 1 | Info.set<EIIK_OutOfLine>( |
481 | 1 | MF.createMIExtraInfo(memoperands(), Symbol, getPostInstrSymbol())); |
482 | 1 | } |
483 | | |
484 | 391 | void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { |
485 | 391 | MCSymbol *OldSymbol = getPostInstrSymbol(); |
486 | 391 | if (OldSymbol == Symbol) |
487 | 330 | return; |
488 | 61 | if (OldSymbol && !Symbol0 ) { |
489 | 0 | // We're removing a symbol rather than adding one. Try to clean up any |
490 | 0 | // extra info carried around. |
491 | 0 | if (Info.is<EIIK_PostInstrSymbol>()) { |
492 | 0 | Info.clear(); |
493 | 0 | return; |
494 | 0 | } |
495 | 0 | |
496 | 0 | if (memoperands_empty()) { |
497 | 0 | assert(getPreInstrSymbol() && |
498 | 0 | "Should never have only a single symbol allocated out-of-line!"); |
499 | 0 | Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol()); |
500 | 0 | return; |
501 | 0 | } |
502 | 61 | |
503 | 61 | // Otherwise fallback on the generic update. |
504 | 61 | } else if (!Info || Info.is<EIIK_PostInstrSymbol>()6 ) { |
505 | 55 | // If we don't have any other extra info, we can store this inline. |
506 | 55 | Info.set<EIIK_PostInstrSymbol>(Symbol); |
507 | 55 | return; |
508 | 55 | } |
509 | 6 | |
510 | 6 | // Otherwise, allocate a full new set of extra info. |
511 | 6 | // FIXME: Maybe we should make the symbols in the extra info mutable? |
512 | 6 | Info.set<EIIK_OutOfLine>( |
513 | 6 | MF.createMIExtraInfo(memoperands(), getPreInstrSymbol(), Symbol)); |
514 | 6 | } |
515 | | |
516 | | void MachineInstr::cloneInstrSymbols(MachineFunction &MF, |
517 | 331 | const MachineInstr &MI) { |
518 | 331 | if (this == &MI) |
519 | 0 | // Nothing to do for a self-clone! |
520 | 0 | return; |
521 | 331 | |
522 | 331 | assert(&MF == MI.getMF() && |
523 | 331 | "Invalid machine functions when cloning instruction symbols!"); |
524 | 331 | |
525 | 331 | setPreInstrSymbol(MF, MI.getPreInstrSymbol()); |
526 | 331 | setPostInstrSymbol(MF, MI.getPostInstrSymbol()); |
527 | 331 | } |
528 | | |
529 | 223k | uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const { |
530 | 223k | // For now, the just return the union of the flags. If the flags get more |
531 | 223k | // complicated over time, we might need more logic here. |
532 | 223k | return getFlags() | Other.getFlags(); |
533 | 223k | } |
534 | | |
535 | 1.09M | uint16_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) { |
536 | 1.09M | uint16_t MIFlags = 0; |
537 | 1.09M | // Copy the wrapping flags. |
538 | 1.09M | if (const OverflowingBinaryOperator *OB = |
539 | 610k | dyn_cast<OverflowingBinaryOperator>(&I)) { |
540 | 610k | if (OB->hasNoSignedWrap()) |
541 | 316k | MIFlags |= MachineInstr::MIFlag::NoSWrap; |
542 | 610k | if (OB->hasNoUnsignedWrap()) |
543 | 102k | MIFlags |= MachineInstr::MIFlag::NoUWrap; |
544 | 610k | } |
545 | 1.09M | |
546 | 1.09M | // Copy the exact flag. |
547 | 1.09M | if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I)) |
548 | 73.3k | if (PE->isExact()) |
549 | 12.7k | MIFlags |= MachineInstr::MIFlag::IsExact; |
550 | 1.09M | |
551 | 1.09M | // Copy the fast-math flags. |
552 | 1.09M | if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) { |
553 | 174k | const FastMathFlags Flags = FP->getFastMathFlags(); |
554 | 174k | if (Flags.noNaNs()) |
555 | 267 | MIFlags |= MachineInstr::MIFlag::FmNoNans; |
556 | 174k | if (Flags.noInfs()) |
557 | 264 | MIFlags |= MachineInstr::MIFlag::FmNoInfs; |
558 | 174k | if (Flags.noSignedZeros()) |
559 | 265 | MIFlags |= MachineInstr::MIFlag::FmNsz; |
560 | 174k | if (Flags.allowReciprocal()) |
561 | 264 | MIFlags |= MachineInstr::MIFlag::FmArcp; |
562 | 174k | if (Flags.allowContract()) |
563 | 264 | MIFlags |= MachineInstr::MIFlag::FmContract; |
564 | 174k | if (Flags.approxFunc()) |
565 | 264 | MIFlags |= MachineInstr::MIFlag::FmAfn; |
566 | 174k | if (Flags.allowReassoc()) |
567 | 264 | MIFlags |= MachineInstr::MIFlag::FmReassoc; |
568 | 174k | } |
569 | 1.09M | |
570 | 1.09M | return MIFlags; |
571 | 1.09M | } |
572 | | |
573 | 275 | void MachineInstr::copyIRFlags(const Instruction &I) { |
574 | 275 | Flags = copyFlagsFromInstruction(I); |
575 | 275 | } |
576 | | |
577 | 377k | bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const { |
578 | 377k | assert(!isBundledWithPred() && "Must be called on bundle header"); |
579 | 925k | for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII547k ) { |
580 | 925k | if (MII->getDesc().getFlags() & Mask) { |
581 | 105k | if (Type == AnyInBundle) |
582 | 105k | return true; |
583 | 819k | } else { |
584 | 819k | if (Type == AllInBundle && !MII->isBundle()6.58k ) |
585 | 6.54k | return false; |
586 | 813k | } |
587 | 813k | // This was the last instruction in the bundle. |
588 | 813k | if (!MII->isBundledWithSucc()) |
589 | 265k | return Type == AllInBundle; |
590 | 813k | } |
591 | 377k | } |
592 | | |
593 | | bool MachineInstr::isIdenticalTo(const MachineInstr &Other, |
594 | 44.3M | MICheckType Check) const { |
595 | 44.3M | // If opcodes or number of operands are not the same then the two |
596 | 44.3M | // instructions are obviously not identical. |
597 | 44.3M | if (Other.getOpcode() != getOpcode() || |
598 | 44.3M | Other.getNumOperands() != getNumOperands()31.8M ) |
599 | 13.3M | return false; |
600 | 31.0M | |
601 | 31.0M | if (isBundle()) { |
602 | 213 | // We have passed the test above that both instructions have the same |
603 | 213 | // opcode, so we know that both instructions are bundles here. Let's compare |
604 | 213 | // MIs inside the bundle. |
605 | 213 | assert(Other.isBundle() && "Expected that both instructions are bundles."); |
606 | 213 | MachineBasicBlock::const_instr_iterator I1 = getIterator(); |
607 | 213 | MachineBasicBlock::const_instr_iterator I2 = Other.getIterator(); |
608 | 213 | // Loop until we analysed the last intruction inside at least one of the |
609 | 213 | // bundles. |
610 | 496 | while (I1->isBundledWithSucc() && I2->isBundledWithSucc()400 ) { |
611 | 400 | ++I1; |
612 | 400 | ++I2; |
613 | 400 | if (!I1->isIdenticalTo(*I2, Check)) |
614 | 117 | return false; |
615 | 400 | } |
616 | 213 | // If we've reached the end of just one of the two bundles, but not both, |
617 | 213 | // the instructions are not identical. |
618 | 213 | if (96 I1->isBundledWithSucc()96 || I2->isBundledWithSucc()96 ) |
619 | 0 | return false; |
620 | 31.0M | } |
621 | 31.0M | |
622 | 31.0M | // Check operands to make sure they match. |
623 | 129M | for (unsigned i = 0, e = getNumOperands(); 31.0M i != e; ++i98.0M ) { |
624 | 104M | const MachineOperand &MO = getOperand(i); |
625 | 104M | const MachineOperand &OMO = Other.getOperand(i); |
626 | 104M | if (!MO.isReg()) { |
627 | 34.8M | if (!MO.isIdenticalTo(OMO)) |
628 | 4.70M | return false; |
629 | 30.1M | continue; |
630 | 30.1M | } |
631 | 69.4M | |
632 | 69.4M | // Clients may or may not want to ignore defs when testing for equality. |
633 | 69.4M | // For example, machine CSE pass only cares about finding common |
634 | 69.4M | // subexpressions, so it's safe to ignore virtual register defs. |
635 | 69.4M | if (MO.isDef()) { |
636 | 36.8M | if (Check == IgnoreDefs) |
637 | 0 | continue; |
638 | 36.8M | else if (Check == IgnoreVRegDefs) { |
639 | 13.7M | if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) || |
640 | 13.7M | !TargetRegisterInfo::isVirtualRegister(OMO.getReg())9.73M ) |
641 | 4.00M | if (!MO.isIdenticalTo(OMO)) |
642 | 28.6k | return false; |
643 | 23.1M | } else { |
644 | 23.1M | if (!MO.isIdenticalTo(OMO)) |
645 | 108k | return false; |
646 | 23.0M | if (Check == CheckKillDead && MO.isDead() != OMO.isDead()69.7k ) |
647 | 5 | return false; |
648 | 32.5M | } |
649 | 32.5M | } else { |
650 | 32.5M | if (!MO.isIdenticalTo(OMO)) |
651 | 1.46M | return false; |
652 | 31.0M | if (Check == CheckKillDead && MO.isKill() != OMO.isKill()44.0k ) |
653 | 1.44k | return false; |
654 | 31.0M | } |
655 | 69.4M | } |
656 | 31.0M | // If DebugLoc does not match then two debug instructions are not identical. |
657 | 31.0M | if (24.7M isDebugInstr()24.7M ) |
658 | 156 | if (getDebugLoc() && Other.getDebugLoc() && |
659 | 156 | getDebugLoc() != Other.getDebugLoc()) |
660 | 0 | return false; |
661 | 24.7M | return true; |
662 | 24.7M | } |
663 | | |
664 | 108M | const MachineFunction *MachineInstr::getMF() const { |
665 | 108M | return getParent()->getParent(); |
666 | 108M | } |
667 | | |
668 | 28.9k | MachineInstr *MachineInstr::removeFromParent() { |
669 | 28.9k | assert(getParent() && "Not embedded in a basic block!"); |
670 | 28.9k | return getParent()->remove(this); |
671 | 28.9k | } |
672 | | |
673 | 98 | MachineInstr *MachineInstr::removeFromBundle() { |
674 | 98 | assert(getParent() && "Not embedded in a basic block!"); |
675 | 98 | return getParent()->remove_instr(this); |
676 | 98 | } |
677 | | |
678 | 33.7M | void MachineInstr::eraseFromParent() { |
679 | 33.7M | assert(getParent() && "Not embedded in a basic block!"); |
680 | 33.7M | getParent()->erase(this); |
681 | 33.7M | } |
682 | | |
683 | 5.30M | void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() { |
684 | 5.30M | assert(getParent() && "Not embedded in a basic block!"); |
685 | 5.30M | MachineBasicBlock *MBB = getParent(); |
686 | 5.30M | MachineFunction *MF = MBB->getParent(); |
687 | 5.30M | assert(MF && "Not embedded in a function!"); |
688 | 5.30M | |
689 | 5.30M | MachineInstr *MI = (MachineInstr *)this; |
690 | 5.30M | MachineRegisterInfo &MRI = MF->getRegInfo(); |
691 | 5.30M | |
692 | 13.2M | for (const MachineOperand &MO : MI->operands()) { |
693 | 13.2M | if (!MO.isReg() || !MO.isDef()11.1M ) |
694 | 8.01M | continue; |
695 | 5.21M | unsigned Reg = MO.getReg(); |
696 | 5.21M | if (!TargetRegisterInfo::isVirtualRegister(Reg)) |
697 | 4.20k | continue; |
698 | 5.20M | MRI.markUsesInDebugValueAsUndef(Reg); |
699 | 5.20M | } |
700 | 5.30M | MI->eraseFromParent(); |
701 | 5.30M | } |
702 | | |
703 | 2.65M | void MachineInstr::eraseFromBundle() { |
704 | 2.65M | assert(getParent() && "Not embedded in a basic block!"); |
705 | 2.65M | getParent()->erase_instr(this); |
706 | 2.65M | } |
707 | | |
708 | 103M | unsigned MachineInstr::getNumExplicitOperands() const { |
709 | 103M | unsigned NumOperands = MCID->getNumOperands(); |
710 | 103M | if (!MCID->isVariadic()) |
711 | 103M | return NumOperands; |
712 | 46.1k | |
713 | 195k | for (unsigned I = NumOperands, E = getNumOperands(); 46.1k I != E; ++I149k ) { |
714 | 150k | const MachineOperand &MO = getOperand(I); |
715 | 150k | // The operands must always be in the following order: |
716 | 150k | // - explicit reg defs, |
717 | 150k | // - other explicit operands (reg uses, immediates, etc.), |
718 | 150k | // - implicit reg defs |
719 | 150k | // - implicit reg uses |
720 | 150k | if (MO.isReg() && MO.isImplicit()90.2k ) |
721 | 1.88k | break; |
722 | 149k | ++NumOperands; |
723 | 149k | } |
724 | 46.1k | return NumOperands; |
725 | 46.1k | } |
726 | | |
727 | 66.8M | unsigned MachineInstr::getNumExplicitDefs() const { |
728 | 66.8M | unsigned NumDefs = MCID->getNumDefs(); |
729 | 66.8M | if (!MCID->isVariadic()) |
730 | 65.7M | return NumDefs; |
731 | 1.15M | |
732 | 1.22M | for (unsigned I = NumDefs, E = getNumOperands(); 1.15M I != E1.22M ; ++I77.3k ) { |
733 | 1.22M | const MachineOperand &MO = getOperand(I); |
734 | 1.22M | if (!MO.isReg() || !MO.isDef()1.12M || MO.isImplicit()77.3k ) |
735 | 1.15M | break; |
736 | 77.3k | ++NumDefs; |
737 | 77.3k | } |
738 | 1.15M | return NumDefs; |
739 | 1.15M | } |
740 | | |
741 | 76.0k | void MachineInstr::bundleWithPred() { |
742 | 76.0k | assert(!isBundledWithPred() && "MI is already bundled with its predecessor"); |
743 | 76.0k | setFlag(BundledPred); |
744 | 76.0k | MachineBasicBlock::instr_iterator Pred = getIterator(); |
745 | 76.0k | --Pred; |
746 | 76.0k | assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags"); |
747 | 76.0k | Pred->setFlag(BundledSucc); |
748 | 76.0k | } |
749 | | |
750 | 34.2k | void MachineInstr::bundleWithSucc() { |
751 | 34.2k | assert(!isBundledWithSucc() && "MI is already bundled with its successor"); |
752 | 34.2k | setFlag(BundledSucc); |
753 | 34.2k | MachineBasicBlock::instr_iterator Succ = getIterator(); |
754 | 34.2k | ++Succ; |
755 | 34.2k | assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags"); |
756 | 34.2k | Succ->setFlag(BundledPred); |
757 | 34.2k | } |
758 | | |
759 | 70.2k | void MachineInstr::unbundleFromPred() { |
760 | 70.2k | assert(isBundledWithPred() && "MI isn't bundled with its predecessor"); |
761 | 70.2k | clearFlag(BundledPred); |
762 | 70.2k | MachineBasicBlock::instr_iterator Pred = getIterator(); |
763 | 70.2k | --Pred; |
764 | 70.2k | assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags"); |
765 | 70.2k | Pred->clearFlag(BundledSucc); |
766 | 70.2k | } |
767 | | |
768 | 5.10k | void MachineInstr::unbundleFromSucc() { |
769 | 5.10k | assert(isBundledWithSucc() && "MI isn't bundled with its successor"); |
770 | 5.10k | clearFlag(BundledSucc); |
771 | 5.10k | MachineBasicBlock::instr_iterator Succ = getIterator(); |
772 | 5.10k | ++Succ; |
773 | 5.10k | assert(Succ->isBundledWithPred() && "Inconsistent bundle flags"); |
774 | 5.10k | Succ->clearFlag(BundledPred); |
775 | 5.10k | } |
776 | | |
777 | 10.7M | bool MachineInstr::isStackAligningInlineAsm() const { |
778 | 10.7M | if (isInlineAsm()) { |
779 | 18.2k | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
780 | 18.2k | if (ExtraInfo & InlineAsm::Extra_IsAlignStack) |
781 | 21 | return true; |
782 | 10.7M | } |
783 | 10.7M | return false; |
784 | 10.7M | } |
785 | | |
786 | 17.8k | InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const { |
787 | 17.8k | assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!"); |
788 | 17.8k | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
789 | 17.8k | return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0); |
790 | 17.8k | } |
791 | | |
792 | | int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx, |
793 | 11.8k | unsigned *GroupNo) const { |
794 | 11.8k | assert(isInlineAsm() && "Expected an inline asm instruction"); |
795 | 11.8k | assert(OpIdx < getNumOperands() && "OpIdx out of range"); |
796 | 11.8k | |
797 | 11.8k | // Ignore queries about the initial operands. |
798 | 11.8k | if (OpIdx < InlineAsm::MIOp_FirstOperand) |
799 | 0 | return -1; |
800 | 11.8k | |
801 | 11.8k | unsigned Group = 0; |
802 | 11.8k | unsigned NumOps; |
803 | 303k | for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; |
804 | 303k | i += NumOps291k ) { |
805 | 303k | const MachineOperand &FlagMO = getOperand(i); |
806 | 303k | // If we reach the implicit register operands, stop looking. |
807 | 303k | if (!FlagMO.isImm()) |
808 | 11.1k | return -1; |
809 | 292k | NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); |
810 | 292k | if (i + NumOps > OpIdx) { |
811 | 699 | if (GroupNo) |
812 | 0 | *GroupNo = Group; |
813 | 699 | return i; |
814 | 699 | } |
815 | 291k | ++Group; |
816 | 291k | } |
817 | 11.8k | return -10 ; |
818 | 11.8k | } |
819 | | |
820 | 13 | const DILabel *MachineInstr::getDebugLabel() const { |
821 | 13 | assert(isDebugLabel() && "not a DBG_LABEL"); |
822 | 13 | return cast<DILabel>(getOperand(0).getMetadata()); |
823 | 13 | } |
824 | | |
825 | 46.1k | const DILocalVariable *MachineInstr::getDebugVariable() const { |
826 | 46.1k | assert(isDebugValue() && "not a DBG_VALUE"); |
827 | 46.1k | return cast<DILocalVariable>(getOperand(2).getMetadata()); |
828 | 46.1k | } |
829 | | |
830 | 24.0M | const DIExpression *MachineInstr::getDebugExpression() const { |
831 | 24.0M | assert(isDebugValue() && "not a DBG_VALUE"); |
832 | 24.0M | return cast<DIExpression>(getOperand(3).getMetadata()); |
833 | 24.0M | } |
834 | | |
835 | | const TargetRegisterClass* |
836 | | MachineInstr::getRegClassConstraint(unsigned OpIdx, |
837 | | const TargetInstrInfo *TII, |
838 | 1.22M | const TargetRegisterInfo *TRI) const { |
839 | 1.22M | assert(getParent() && "Can't have an MBB reference here!"); |
840 | 1.22M | assert(getMF() && "Can't have an MF reference here!"); |
841 | 1.22M | const MachineFunction &MF = *getMF(); |
842 | 1.22M | |
843 | 1.22M | // Most opcodes have fixed constraints in their MCInstrDesc. |
844 | 1.22M | if (!isInlineAsm()) |
845 | 1.21M | return TII->getRegClass(getDesc(), OpIdx, TRI, MF); |
846 | 662 | |
847 | 662 | if (!getOperand(OpIdx).isReg()) |
848 | 0 | return nullptr; |
849 | 662 | |
850 | 662 | // For tied uses on inline asm, get the constraint from the def. |
851 | 662 | unsigned DefIdx; |
852 | 662 | if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx)644 ) |
853 | 1 | OpIdx = DefIdx; |
854 | 662 | |
855 | 662 | // Inline asm stores register class constraints in the flag word. |
856 | 662 | int FlagIdx = findInlineAsmFlagIdx(OpIdx); |
857 | 662 | if (FlagIdx < 0) |
858 | 0 | return nullptr; |
859 | 662 | |
860 | 662 | unsigned Flag = getOperand(FlagIdx).getImm(); |
861 | 662 | unsigned RCID; |
862 | 662 | if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse || |
863 | 662 | InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef22 || |
864 | 662 | InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber5 ) && |
865 | 662 | InlineAsm::hasRegClassConstraint(Flag, RCID)659 ) |
866 | 659 | return TRI->getRegClass(RCID); |
867 | 3 | |
868 | 3 | // Assume that all registers in a memory operand are pointers. |
869 | 3 | if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem) |
870 | 3 | return TRI->getPointerRegClass(MF); |
871 | 0 | |
872 | 0 | return nullptr; |
873 | 0 | } |
874 | | |
875 | | const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg( |
876 | | unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, |
877 | 122 | const TargetRegisterInfo *TRI, bool ExploreBundle) const { |
878 | 122 | // Check every operands inside the bundle if we have |
879 | 122 | // been asked to. |
880 | 122 | if (ExploreBundle) |
881 | 814 | for (ConstMIBundleOperands OpndIt(*this); 122 OpndIt.isValid() && CurRC692 ; |
882 | 692 | ++OpndIt) |
883 | 692 | CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl( |
884 | 692 | OpndIt.getOperandNo(), Reg, CurRC, TII, TRI); |
885 | 0 | else |
886 | 0 | // Otherwise, just check the current operands. |
887 | 0 | for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i) |
888 | 0 | CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI); |
889 | 122 | return CurRC; |
890 | 122 | } |
891 | | |
892 | | const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl( |
893 | | unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC, |
894 | 692 | const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { |
895 | 692 | assert(CurRC && "Invalid initial register class"); |
896 | 692 | // Check if Reg is constrained by some of its use/def from MI. |
897 | 692 | const MachineOperand &MO = getOperand(OpIdx); |
898 | 692 | if (!MO.isReg() || MO.getReg() != Reg492 ) |
899 | 567 | return CurRC; |
900 | 125 | // If yes, accumulate the constraints through the operand. |
901 | 125 | return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI); |
902 | 125 | } |
903 | | |
904 | | const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect( |
905 | | unsigned OpIdx, const TargetRegisterClass *CurRC, |
906 | 96.3k | const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { |
907 | 96.3k | const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI); |
908 | 96.3k | const MachineOperand &MO = getOperand(OpIdx); |
909 | 96.3k | assert(MO.isReg() && |
910 | 96.3k | "Cannot get register constraints for non-register operand"); |
911 | 96.3k | assert(CurRC && "Invalid initial register class"); |
912 | 96.3k | if (unsigned SubIdx = MO.getSubReg()) { |
913 | 9.95k | if (OpRC) |
914 | 8.26k | CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx); |
915 | 1.69k | else |
916 | 1.69k | CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx); |
917 | 86.4k | } else if (OpRC) |
918 | 49.9k | CurRC = TRI->getCommonSubClass(CurRC, OpRC); |
919 | 96.3k | return CurRC; |
920 | 96.3k | } |
921 | | |
922 | | /// Return the number of instructions inside the MI bundle, not counting the |
923 | | /// header instruction. |
924 | 8.15k | unsigned MachineInstr::getBundleSize() const { |
925 | 8.15k | MachineBasicBlock::const_instr_iterator I = getIterator(); |
926 | 8.15k | unsigned Size = 0; |
927 | 28.3k | while (I->isBundledWithSucc()) { |
928 | 20.2k | ++Size; |
929 | 20.2k | ++I; |
930 | 20.2k | } |
931 | 8.15k | return Size; |
932 | 8.15k | } |
933 | | |
934 | | /// Returns true if the MachineInstr has an implicit-use operand of exactly |
935 | | /// the given register (not considering sub/super-registers). |
936 | 8.33M | bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const { |
937 | 49.7M | for (unsigned i = 0, e = getNumOperands(); i != e; ++i41.4M ) { |
938 | 49.7M | const MachineOperand &MO = getOperand(i); |
939 | 49.7M | if (MO.isReg() && MO.isUse()29.2M && MO.isImplicit()22.0M && MO.getReg() == Reg8.73M ) |
940 | 8.33M | return true; |
941 | 49.7M | } |
942 | 8.33M | return false0 ; |
943 | 8.33M | } |
944 | | |
945 | | /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of |
946 | | /// the specific register or -1 if it is not found. It further tightens |
947 | | /// the search criteria to a use that kills the register if isKill is true. |
948 | | int MachineInstr::findRegisterUseOperandIdx( |
949 | 8.74M | unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const { |
950 | 32.4M | for (unsigned i = 0, e = getNumOperands(); i != e; ++i23.7M ) { |
951 | 29.6M | const MachineOperand &MO = getOperand(i); |
952 | 29.6M | if (!MO.isReg() || !MO.isUse()15.0M ) |
953 | 20.1M | continue; |
954 | 9.54M | unsigned MOReg = MO.getReg(); |
955 | 9.54M | if (!MOReg) |
956 | 199k | continue; |
957 | 9.34M | if (MOReg == Reg || (3.13M TRI3.13M && Reg1.40M && MOReg1.40M && TRI->regsOverlap(MOReg, Reg)1.40M )) |
958 | 6.21M | if (!isKill || MO.isKill()930k ) |
959 | 5.93M | return i; |
960 | 9.34M | } |
961 | 8.74M | return -12.80M ; |
962 | 8.74M | } |
963 | | |
964 | | /// readsWritesVirtualRegister - Return a pair of bools (reads, writes) |
965 | | /// indicating if this instruction reads or writes Reg. This also considers |
966 | | /// partial defines. |
967 | | std::pair<bool,bool> |
968 | | MachineInstr::readsWritesVirtualRegister(unsigned Reg, |
969 | 36.3M | SmallVectorImpl<unsigned> *Ops) const { |
970 | 36.3M | bool PartDef = false; // Partial redefine. |
971 | 36.3M | bool FullDef = false; // Full define. |
972 | 36.3M | bool Use = false; |
973 | 36.3M | |
974 | 148M | for (unsigned i = 0, e = getNumOperands(); i != e; ++i112M ) { |
975 | 112M | const MachineOperand &MO = getOperand(i); |
976 | 112M | if (!MO.isReg() || MO.getReg() != Reg85.2M ) |
977 | 74.4M | continue; |
978 | 37.5M | if (Ops) |
979 | 8.36M | Ops->push_back(i); |
980 | 37.5M | if (MO.isUse()) |
981 | 22.5M | Use |= !MO.isUndef(); |
982 | 14.9M | else if (MO.getSubReg() && !MO.isUndef()1.09M ) |
983 | 185k | // A partial def undef doesn't count as reading the register. |
984 | 185k | PartDef = true; |
985 | 14.7M | else |
986 | 14.7M | FullDef = true; |
987 | 37.5M | } |
988 | 36.3M | // A partial redefine uses Reg unless there is also a full define. |
989 | 36.3M | return std::make_pair(Use || (13.9M PartDef13.9M && !FullDef129k ), PartDef || FullDef36.1M ); |
990 | 36.3M | } |
991 | | |
992 | | /// findRegisterDefOperandIdx() - Returns the operand index that is a def of |
993 | | /// the specified register or -1 if it is not found. If isDead is true, defs |
994 | | /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it |
995 | | /// also checks if there is a def of a super-register. |
996 | | int |
997 | | MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap, |
998 | 72.8M | const TargetRegisterInfo *TRI) const { |
999 | 72.8M | bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg); |
1000 | 292M | for (unsigned i = 0, e = getNumOperands(); i != e; ++i219M ) { |
1001 | 230M | const MachineOperand &MO = getOperand(i); |
1002 | 230M | // Accept regmask operands when Overlap is set. |
1003 | 230M | // Ignore them when looking for a specific def operand (Overlap == false). |
1004 | 230M | if (isPhys && Overlap224M && MO.isRegMask()69.7M && MO.clobbersPhysReg(Reg)147k ) |
1005 | 101k | return i; |
1006 | 230M | if (!MO.isReg() || !MO.isDef()159M ) |
1007 | 157M | continue; |
1008 | 72.8M | unsigned MOReg = MO.getReg(); |
1009 | 72.8M | bool Found = (MOReg == Reg); |
1010 | 72.8M | if (!Found && TRI60.3M && isPhys20.9M && |
1011 | 72.8M | TargetRegisterInfo::isPhysicalRegister(MOReg)19.9M ) { |
1012 | 11.2M | if (Overlap) |
1013 | 9.88M | Found = TRI->regsOverlap(MOReg, Reg); |
1014 | 1.40M | else |
1015 | 1.40M | Found = TRI->isSubRegister(MOReg, Reg); |
1016 | 11.2M | } |
1017 | 72.8M | if (Found && (12.5M !isDead12.5M || MO.isDead()3.05M )) |
1018 | 10.9M | return i; |
1019 | 72.8M | } |
1020 | 72.8M | return -161.7M ; |
1021 | 72.8M | } |
1022 | | |
1023 | | /// findFirstPredOperandIdx() - Find the index of the first operand in the |
1024 | | /// operand list that is used to represent the predicate. It returns -1 if |
1025 | | /// none is found. |
1026 | 11.7M | int MachineInstr::findFirstPredOperandIdx() const { |
1027 | 11.7M | // Don't call MCID.findFirstPredOperandIdx() because this variant |
1028 | 11.7M | // is sometimes called on an instruction that's not yet complete, and |
1029 | 11.7M | // so the number of operands is less than the MCID indicates. In |
1030 | 11.7M | // particular, the PTX target does this. |
1031 | 11.7M | const MCInstrDesc &MCID = getDesc(); |
1032 | 11.7M | if (MCID.isPredicable()) { |
1033 | 28.7M | for (unsigned i = 0, e = getNumOperands(); i != e; ++i17.7M ) |
1034 | 28.7M | if (MCID.OpInfo[i].isPredicate()) |
1035 | 10.9M | return i; |
1036 | 10.9M | } |
1037 | 11.7M | |
1038 | 11.7M | return -1809k ; |
1039 | 11.7M | } |
1040 | | |
1041 | | // MachineOperand::TiedTo is 4 bits wide. |
1042 | | const unsigned TiedMax = 15; |
1043 | | |
1044 | | /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other. |
1045 | | /// |
1046 | | /// Use and def operands can be tied together, indicated by a non-zero TiedTo |
1047 | | /// field. TiedTo can have these values: |
1048 | | /// |
1049 | | /// 0: Operand is not tied to anything. |
1050 | | /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1). |
1051 | | /// TiedMax: Tied to an operand >= TiedMax-1. |
1052 | | /// |
1053 | | /// The tied def must be one of the first TiedMax operands on a normal |
1054 | | /// instruction. INLINEASM instructions allow more tied defs. |
1055 | | /// |
1056 | 1.33M | void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) { |
1057 | 1.33M | MachineOperand &DefMO = getOperand(DefIdx); |
1058 | 1.33M | MachineOperand &UseMO = getOperand(UseIdx); |
1059 | 1.33M | assert(DefMO.isDef() && "DefIdx must be a def operand"); |
1060 | 1.33M | assert(UseMO.isUse() && "UseIdx must be a use operand"); |
1061 | 1.33M | assert(!DefMO.isTied() && "Def is already tied to another use"); |
1062 | 1.33M | assert(!UseMO.isTied() && "Use is already tied to another def"); |
1063 | 1.33M | |
1064 | 1.33M | if (DefIdx < TiedMax) |
1065 | 1.33M | UseMO.TiedTo = DefIdx + 1; |
1066 | 93 | else { |
1067 | 93 | // Inline asm can use the group descriptors to find tied operands, but on |
1068 | 93 | // normal instruction, the tied def must be within the first TiedMax |
1069 | 93 | // operands. |
1070 | 93 | assert(isInlineAsm() && "DefIdx out of range"); |
1071 | 93 | UseMO.TiedTo = TiedMax; |
1072 | 93 | } |
1073 | 1.33M | |
1074 | 1.33M | // UseIdx can be out of range, we'll search for it in findTiedOperandIdx(). |
1075 | 1.33M | DefMO.TiedTo = std::min(UseIdx + 1, TiedMax); |
1076 | 1.33M | } |
1077 | | |
1078 | | /// Given the index of a tied register operand, find the operand it is tied to. |
1079 | | /// Defs are tied to uses and vice versa. Returns the index of the tied operand |
1080 | | /// which must exist. |
1081 | 6.23M | unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { |
1082 | 6.23M | const MachineOperand &MO = getOperand(OpIdx); |
1083 | 6.23M | assert(MO.isTied() && "Operand isn't tied"); |
1084 | 6.23M | |
1085 | 6.23M | // Normally TiedTo is in range. |
1086 | 6.23M | if (MO.TiedTo < TiedMax) |
1087 | 6.21M | return MO.TiedTo - 1; |
1088 | 24.6k | |
1089 | 24.6k | // Uses on normal instructions can be out of range. |
1090 | 24.6k | if (!isInlineAsm()) { |
1091 | 6.73k | // Normal tied defs must be in the 0..TiedMax-1 range. |
1092 | 6.73k | if (MO.isUse()) |
1093 | 0 | return TiedMax - 1; |
1094 | 6.73k | // MO is a def. Search for the tied use. |
1095 | 12.1k | for (unsigned i = TiedMax - 1, e = getNumOperands(); 6.73k i != e; ++i5.40k ) { |
1096 | 12.1k | const MachineOperand &UseMO = getOperand(i); |
1097 | 12.1k | if (UseMO.isReg() && UseMO.isUse()9.06k && UseMO.TiedTo == OpIdx + 19.06k ) |
1098 | 6.73k | return i; |
1099 | 12.1k | } |
1100 | 6.73k | llvm_unreachable0 ("Can't find tied use"); |
1101 | 17.8k | } |
1102 | 17.8k | |
1103 | 17.8k | // Now deal with inline asm by parsing the operand group descriptor flags. |
1104 | 17.8k | // Find the beginning of each operand group. |
1105 | 17.8k | SmallVector<unsigned, 8> GroupIdx; |
1106 | 17.8k | unsigned OpIdxGroup = ~0u; |
1107 | 17.8k | unsigned NumOps; |
1108 | 1.61M | for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; |
1109 | 1.61M | i += NumOps1.59M ) { |
1110 | 1.61M | const MachineOperand &FlagMO = getOperand(i); |
1111 | 1.61M | assert(FlagMO.isImm() && "Invalid tied operand on inline asm"); |
1112 | 1.61M | unsigned CurGroup = GroupIdx.size(); |
1113 | 1.61M | GroupIdx.push_back(i); |
1114 | 1.61M | NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); |
1115 | 1.61M | // OpIdx belongs to this operand group. |
1116 | 1.61M | if (OpIdx > i && OpIdx < i + NumOps1.13M ) |
1117 | 17.8k | OpIdxGroup = CurGroup; |
1118 | 1.61M | unsigned TiedGroup; |
1119 | 1.61M | if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup)) |
1120 | 1.04M | continue; |
1121 | 562k | // Operands in this group are tied to operands in TiedGroup which must be |
1122 | 562k | // earlier. Find the number of operands between the two groups. |
1123 | 562k | unsigned Delta = i - GroupIdx[TiedGroup]; |
1124 | 562k | |
1125 | 562k | // OpIdx is a use tied to TiedGroup. |
1126 | 562k | if (OpIdxGroup == CurGroup) |
1127 | 9.67k | return OpIdx - Delta; |
1128 | 552k | |
1129 | 552k | // OpIdx is a def tied to this use group. |
1130 | 552k | if (OpIdxGroup == TiedGroup) |
1131 | 8.20k | return OpIdx + Delta; |
1132 | 552k | } |
1133 | 17.8k | llvm_unreachable0 ("Invalid tied operand on inline asm"); |
1134 | 17.8k | } |
1135 | | |
1136 | | /// clearKillInfo - Clears kill flags on all operands. |
1137 | | /// |
1138 | 76.8k | void MachineInstr::clearKillInfo() { |
1139 | 220k | for (MachineOperand &MO : operands()) { |
1140 | 220k | if (MO.isReg() && MO.isUse()137k ) |
1141 | 77.4k | MO.setIsKill(false); |
1142 | 220k | } |
1143 | 76.8k | } |
1144 | | |
1145 | | void MachineInstr::substituteRegister(unsigned FromReg, unsigned ToReg, |
1146 | | unsigned SubIdx, |
1147 | 1.19M | const TargetRegisterInfo &RegInfo) { |
1148 | 1.19M | if (TargetRegisterInfo::isPhysicalRegister(ToReg)) { |
1149 | 582k | if (SubIdx) |
1150 | 0 | ToReg = RegInfo.getSubReg(ToReg, SubIdx); |
1151 | 1.37M | for (MachineOperand &MO : operands()) { |
1152 | 1.37M | if (!MO.isReg() || MO.getReg() != FromReg654k ) |
1153 | 792k | continue; |
1154 | 582k | MO.substPhysReg(ToReg, RegInfo); |
1155 | 582k | } |
1156 | 607k | } else { |
1157 | 1.50M | for (MachineOperand &MO : operands()) { |
1158 | 1.50M | if (!MO.isReg() || MO.getReg() != FromReg670k ) |
1159 | 893k | continue; |
1160 | 607k | MO.substVirtReg(ToReg, SubIdx, RegInfo); |
1161 | 607k | } |
1162 | 607k | } |
1163 | 1.19M | } |
1164 | | |
1165 | | /// isSafeToMove - Return true if it is safe to move this instruction. If |
1166 | | /// SawStore is set to true, it means that there is a store (or call) between |
1167 | | /// the instruction's location and its intended destination. |
1168 | 165M | bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const { |
1169 | 165M | // Ignore stuff that we obviously can't move. |
1170 | 165M | // |
1171 | 165M | // Treat volatile loads as stores. This is not strictly necessary for |
1172 | 165M | // volatiles, but it is required for atomic loads. It is not allowed to move |
1173 | 165M | // a load across an atomic load with Ordering > Monotonic. |
1174 | 165M | if (mayStore() || isCall()156M || isPHI()148M || |
1175 | 165M | (143M mayLoad()143M && hasOrderedMemoryRef()12.2M )) { |
1176 | 21.8M | SawStore = true; |
1177 | 21.8M | return false; |
1178 | 21.8M | } |
1179 | 143M | |
1180 | 143M | if (isPosition() || isDebugInstr()142M || isTerminator()142M || |
1181 | 143M | mayRaiseFPException()125M || hasUnmodeledSideEffects()125M ) |
1182 | 32.5M | return false; |
1183 | 110M | |
1184 | 110M | // See if this instruction does a load. If so, we have to guarantee that the |
1185 | 110M | // loaded value doesn't change between the load and the its intended |
1186 | 110M | // destination. The check for isInvariantLoad gives the targe the chance to |
1187 | 110M | // classify the load as always returning a constant, e.g. a constant pool |
1188 | 110M | // load. |
1189 | 110M | if (mayLoad() && !isDereferenceableInvariantLoad(AA)12.0M ) |
1190 | 11.1M | // Otherwise, this is a real load. If there is a store between the load and |
1191 | 11.1M | // end of block, we can't move it. |
1192 | 11.1M | return !SawStore; |
1193 | 99.5M | |
1194 | 99.5M | return true; |
1195 | 99.5M | } |
1196 | | |
1197 | | bool MachineInstr::mayAlias(AliasAnalysis *AA, const MachineInstr &Other, |
1198 | 17.2M | bool UseTBAA) const { |
1199 | 17.2M | const MachineFunction *MF = getMF(); |
1200 | 17.2M | const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); |
1201 | 17.2M | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
1202 | 17.2M | |
1203 | 17.2M | // If neither instruction stores to memory, they can't alias in any |
1204 | 17.2M | // meaningful way, even if they read from the same address. |
1205 | 17.2M | if (!mayStore() && !Other.mayStore()2.16M ) |
1206 | 0 | return false; |
1207 | 17.2M | |
1208 | 17.2M | // Let the target decide if memory accesses cannot possibly overlap. |
1209 | 17.2M | if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA)) |
1210 | 10.8M | return false; |
1211 | 6.35M | |
1212 | 6.35M | // FIXME: Need to handle multiple memory operands to support all targets. |
1213 | 6.35M | if (!hasOneMemOperand() || !Other.hasOneMemOperand()6.26M ) |
1214 | 207k | return true; |
1215 | 6.14M | |
1216 | 6.14M | MachineMemOperand *MMOa = *memoperands_begin(); |
1217 | 6.14M | MachineMemOperand *MMOb = *Other.memoperands_begin(); |
1218 | 6.14M | |
1219 | 6.14M | // The following interface to AA is fashioned after DAGCombiner::isAlias |
1220 | 6.14M | // and operates with MachineMemOperand offset with some important |
1221 | 6.14M | // assumptions: |
1222 | 6.14M | // - LLVM fundamentally assumes flat address spaces. |
1223 | 6.14M | // - MachineOperand offset can *only* result from legalization and |
1224 | 6.14M | // cannot affect queries other than the trivial case of overlap |
1225 | 6.14M | // checking. |
1226 | 6.14M | // - These offsets never wrap and never step outside |
1227 | 6.14M | // of allocated objects. |
1228 | 6.14M | // - There should never be any negative offsets here. |
1229 | 6.14M | // |
1230 | 6.14M | // FIXME: Modify API to hide this math from "user" |
1231 | 6.14M | // Even before we go to AA we can reason locally about some |
1232 | 6.14M | // memory objects. It can save compile time, and possibly catch some |
1233 | 6.14M | // corner cases not currently covered. |
1234 | 6.14M | |
1235 | 6.14M | int64_t OffsetA = MMOa->getOffset(); |
1236 | 6.14M | int64_t OffsetB = MMOb->getOffset(); |
1237 | 6.14M | int64_t MinOffset = std::min(OffsetA, OffsetB); |
1238 | 6.14M | |
1239 | 6.14M | uint64_t WidthA = MMOa->getSize(); |
1240 | 6.14M | uint64_t WidthB = MMOb->getSize(); |
1241 | 6.14M | bool KnownWidthA = WidthA != MemoryLocation::UnknownSize; |
1242 | 6.14M | bool KnownWidthB = WidthB != MemoryLocation::UnknownSize; |
1243 | 6.14M | |
1244 | 6.14M | const Value *ValA = MMOa->getValue(); |
1245 | 6.14M | const Value *ValB = MMOb->getValue(); |
1246 | 6.14M | bool SameVal = (ValA && ValB4.24M && (ValA == ValB)4.15M ); |
1247 | 6.14M | if (!SameVal) { |
1248 | 5.77M | const PseudoSourceValue *PSVa = MMOa->getPseudoValue(); |
1249 | 5.77M | const PseudoSourceValue *PSVb = MMOb->getPseudoValue(); |
1250 | 5.77M | if (PSVa && ValB1.28M && !PSVa->mayAlias(&MFI)54.5k ) |
1251 | 46.7k | return false; |
1252 | 5.73M | if (PSVb && ValA1.21M && !PSVb->mayAlias(&MFI)60.2k ) |
1253 | 42.3k | return false; |
1254 | 5.69M | if (PSVa && PSVb1.23M && (PSVa == PSVb)1.13M ) |
1255 | 990k | SameVal = true; |
1256 | 5.69M | } |
1257 | 6.14M | |
1258 | 6.14M | if (6.05M SameVal6.05M ) { |
1259 | 1.35M | if (!KnownWidthA || !KnownWidthB1.35M ) |
1260 | 48 | return true; |
1261 | 1.35M | int64_t MaxOffset = std::max(OffsetA, OffsetB); |
1262 | 1.35M | int64_t LowWidth = (MinOffset == OffsetA) ? WidthA418k : WidthB935k ; |
1263 | 1.35M | return (MinOffset + LowWidth > MaxOffset); |
1264 | 1.35M | } |
1265 | 4.70M | |
1266 | 4.70M | if (!AA) |
1267 | 4.33M | return true; |
1268 | 361k | |
1269 | 361k | if (!ValA || !ValB346k ) |
1270 | 18.9k | return true; |
1271 | 342k | |
1272 | 342k | assert((OffsetA >= 0) && "Negative MachineMemOperand offset"); |
1273 | 342k | assert((OffsetB >= 0) && "Negative MachineMemOperand offset"); |
1274 | 342k | |
1275 | 342k | int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset341k |
1276 | 342k | : MemoryLocation::UnknownSize178 ; |
1277 | 342k | int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset341k |
1278 | 342k | : MemoryLocation::UnknownSize239 ; |
1279 | 342k | |
1280 | 342k | AliasResult AAResult = AA->alias( |
1281 | 342k | MemoryLocation(ValA, OverlapA, |
1282 | 342k | UseTBAA ? MMOa->getAAInfo()25.4k : AAMDNodes()316k ), |
1283 | 342k | MemoryLocation(ValB, OverlapB, |
1284 | 342k | UseTBAA ? MMOb->getAAInfo()25.4k : AAMDNodes()316k )); |
1285 | 342k | |
1286 | 342k | return (AAResult != NoAlias); |
1287 | 342k | } |
1288 | | |
1289 | | /// hasOrderedMemoryRef - Return true if this instruction may have an ordered |
1290 | | /// or volatile memory reference, or if the information describing the memory |
1291 | | /// reference is not available. Return false if it is known to have no ordered |
1292 | | /// memory references. |
1293 | 74.5M | bool MachineInstr::hasOrderedMemoryRef() const { |
1294 | 74.5M | // An instruction known never to access memory won't have a volatile access. |
1295 | 74.5M | if (!mayStore() && |
1296 | 74.5M | !mayLoad()41.7M && |
1297 | 74.5M | !isCall()17.2M && |
1298 | 74.5M | !hasUnmodeledSideEffects()16.5M ) |
1299 | 16.3M | return false; |
1300 | 58.1M | |
1301 | 58.1M | // Otherwise, if the instruction has no memory reference information, |
1302 | 58.1M | // conservatively assume it wasn't preserved. |
1303 | 58.1M | if (memoperands_empty()) |
1304 | 1.69M | return true; |
1305 | 56.4M | |
1306 | 56.4M | // Check if any of our memory operands are ordered. |
1307 | 57.0M | return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) 56.4M { |
1308 | 57.0M | return !MMO->isUnordered(); |
1309 | 57.0M | }); |
1310 | 56.4M | } |
1311 | | |
1312 | | /// isDereferenceableInvariantLoad - Return true if this instruction will never |
1313 | | /// trap and is loading from a location whose value is invariant across a run of |
1314 | | /// this function. |
1315 | 23.2M | bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const { |
1316 | 23.2M | // If the instruction doesn't load at all, it isn't an invariant load. |
1317 | 23.2M | if (!mayLoad()) |
1318 | 4.99M | return false; |
1319 | 18.2M | |
1320 | 18.2M | // If the instruction has lost its memoperands, conservatively assume that |
1321 | 18.2M | // it may not be an invariant load. |
1322 | 18.2M | if (memoperands_empty()) |
1323 | 127k | return false; |
1324 | 18.0M | |
1325 | 18.0M | const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo(); |
1326 | 18.0M | |
1327 | 18.0M | for (MachineMemOperand *MMO : memoperands()) { |
1328 | 18.0M | if (!MMO->isUnordered()) |
1329 | 122k | // If the memory operand has ordering side effects, we can't move the |
1330 | 122k | // instruction. Such an instruction is technically an invariant load, |
1331 | 122k | // but the caller code would need updated to expect that. |
1332 | 122k | return false; |
1333 | 17.9M | if (MMO->isStore()) return false709 ; |
1334 | 17.9M | if (MMO->isInvariant() && MMO->isDereferenceable()618k ) |
1335 | 377k | continue; |
1336 | 17.5M | |
1337 | 17.5M | // A load from a constant PseudoSourceValue is invariant. |
1338 | 17.5M | if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) |
1339 | 1.51M | if (PSV->isConstant(&MFI)) |
1340 | 1.20M | continue; |
1341 | 16.3M | |
1342 | 16.3M | if (const Value *V = MMO->getValue()) { |
1343 | 16.0M | // If we have an AliasAnalysis, ask it whether the memory is constant. |
1344 | 16.0M | if (AA && |
1345 | 16.0M | AA->pointsToConstantMemory( |
1346 | 8.11M | MemoryLocation(V, MMO->getSize(), MMO->getAAInfo()))) |
1347 | 59.4k | continue; |
1348 | 16.3M | } |
1349 | 16.3M | |
1350 | 16.3M | // Otherwise assume conservatively. |
1351 | 16.3M | return false; |
1352 | 16.3M | } |
1353 | 18.0M | |
1354 | 18.0M | // Everything checks out. |
1355 | 18.0M | return true1.64M ; |
1356 | 18.0M | } |
1357 | | |
1358 | | /// isConstantValuePHI - If the specified instruction is a PHI that always |
1359 | | /// merges together the same virtual register, return the register, otherwise |
1360 | | /// return 0. |
1361 | 92 | unsigned MachineInstr::isConstantValuePHI() const { |
1362 | 92 | if (!isPHI()) |
1363 | 0 | return 0; |
1364 | 92 | assert(getNumOperands() >= 3 && |
1365 | 92 | "It's illegal to have a PHI without source operands"); |
1366 | 92 | |
1367 | 92 | unsigned Reg = getOperand(1).getReg(); |
1368 | 92 | for (unsigned i = 3, e = getNumOperands(); i < e; i += 20 ) |
1369 | 92 | if (getOperand(i).getReg() != Reg) |
1370 | 92 | return 0; |
1371 | 92 | return Reg0 ; |
1372 | 92 | } |
1373 | | |
1374 | 259M | bool MachineInstr::hasUnmodeledSideEffects() const { |
1375 | 259M | if (hasProperty(MCID::UnmodeledSideEffects)) |
1376 | 25.8M | return true; |
1377 | 233M | if (isInlineAsm()) { |
1378 | 50.2k | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
1379 | 50.2k | if (ExtraInfo & InlineAsm::Extra_HasSideEffects) |
1380 | 45.5k | return true; |
1381 | 233M | } |
1382 | 233M | |
1383 | 233M | return false; |
1384 | 233M | } |
1385 | | |
1386 | 23.9M | bool MachineInstr::isLoadFoldBarrier() const { |
1387 | 23.9M | return mayStore() || isCall()22.4M || hasUnmodeledSideEffects()20.9M ; |
1388 | 23.9M | } |
1389 | | |
1390 | | /// allDefsAreDead - Return true if all the defs of this instruction are dead. |
1391 | | /// |
1392 | 6.95M | bool MachineInstr::allDefsAreDead() const { |
1393 | 7.75M | for (const MachineOperand &MO : operands()) { |
1394 | 7.75M | if (!MO.isReg() || MO.isUse()7.05M ) |
1395 | 784k | continue; |
1396 | 6.97M | if (!MO.isDead()) |
1397 | 6.35M | return false; |
1398 | 6.97M | } |
1399 | 6.95M | return true597k ; |
1400 | 6.95M | } |
1401 | | |
1402 | | /// copyImplicitOps - Copy implicit register operands from specified |
1403 | | /// instruction to this instruction. |
1404 | | void MachineInstr::copyImplicitOps(MachineFunction &MF, |
1405 | 23.0k | const MachineInstr &MI) { |
1406 | 23.0k | for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands(); |
1407 | 73.2k | i != e; ++i50.1k ) { |
1408 | 50.1k | const MachineOperand &MO = MI.getOperand(i); |
1409 | 50.1k | if ((MO.isReg() && MO.isImplicit()42.3k ) || MO.isRegMask()7.78k ) |
1410 | 49.9k | addOperand(MF, MO); |
1411 | 50.1k | } |
1412 | 23.0k | } |
1413 | | |
1414 | 98.9k | bool MachineInstr::hasComplexRegisterTies() const { |
1415 | 98.9k | const MCInstrDesc &MCID = getDesc(); |
1416 | 383k | for (unsigned I = 0, E = getNumOperands(); I < E; ++I284k ) { |
1417 | 284k | const auto &Operand = getOperand(I); |
1418 | 284k | if (!Operand.isReg() || Operand.isDef()223k ) |
1419 | 151k | // Ignore the defined registers as MCID marks only the uses as tied. |
1420 | 151k | continue; |
1421 | 133k | int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO); |
1422 | 133k | int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I))2.58k : -1130k ; |
1423 | 133k | if (ExpectedTiedIdx != TiedIdx) |
1424 | 12 | return true; |
1425 | 133k | } |
1426 | 98.9k | return false98.9k ; |
1427 | 98.9k | } |
1428 | | |
1429 | | LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, |
1430 | 704k | const MachineRegisterInfo &MRI) const { |
1431 | 704k | const MachineOperand &Op = getOperand(OpIdx); |
1432 | 704k | if (!Op.isReg()) |
1433 | 237k | return LLT{}; |
1434 | 467k | |
1435 | 467k | if (isVariadic() || OpIdx >= getNumExplicitOperands()441k ) |
1436 | 160k | return MRI.getType(Op.getReg()); |
1437 | 306k | |
1438 | 306k | auto &OpInfo = getDesc().OpInfo[OpIdx]; |
1439 | 306k | if (!OpInfo.isGenericType()) |
1440 | 255k | return MRI.getType(Op.getReg()); |
1441 | 51.0k | |
1442 | 51.0k | if (PrintedTypes[OpInfo.getGenericTypeIndex()]) |
1443 | 14.8k | return LLT{}; |
1444 | 36.2k | |
1445 | 36.2k | LLT TypeToPrint = MRI.getType(Op.getReg()); |
1446 | 36.2k | // Don't mark the type index printed if it wasn't actually printed: maybe |
1447 | 36.2k | // another operand with the same type index has an actual type attached: |
1448 | 36.2k | if (TypeToPrint.isValid()) |
1449 | 36.2k | PrintedTypes.set(OpInfo.getGenericTypeIndex()); |
1450 | 36.2k | return TypeToPrint; |
1451 | 36.2k | } |
1452 | | |
1453 | | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
1454 | | LLVM_DUMP_METHOD void MachineInstr::dump() const { |
1455 | | dbgs() << " "; |
1456 | | print(dbgs()); |
1457 | | } |
1458 | | #endif |
1459 | | |
1460 | | void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers, |
1461 | | bool SkipDebugLoc, bool AddNewLine, |
1462 | 589 | const TargetInstrInfo *TII) const { |
1463 | 589 | const Module *M = nullptr; |
1464 | 589 | const Function *F = nullptr; |
1465 | 589 | if (const MachineFunction *MF = getMFIfAvailable(*this)) { |
1466 | 588 | F = &MF->getFunction(); |
1467 | 588 | M = F->getParent(); |
1468 | 588 | if (!TII) |
1469 | 588 | TII = MF->getSubtarget().getInstrInfo(); |
1470 | 588 | } |
1471 | 589 | |
1472 | 589 | ModuleSlotTracker MST(M); |
1473 | 589 | if (F) |
1474 | 588 | MST.incorporateFunction(*F); |
1475 | 589 | print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII); |
1476 | 589 | } |
1477 | | |
1478 | | void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, |
1479 | | bool IsStandalone, bool SkipOpers, bool SkipDebugLoc, |
1480 | 135k | bool AddNewLine, const TargetInstrInfo *TII) const { |
1481 | 135k | // We can be a bit tidier if we know the MachineFunction. |
1482 | 135k | const MachineFunction *MF = nullptr; |
1483 | 135k | const TargetRegisterInfo *TRI = nullptr; |
1484 | 135k | const MachineRegisterInfo *MRI = nullptr; |
1485 | 135k | const TargetIntrinsicInfo *IntrinsicInfo = nullptr; |
1486 | 135k | tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII); |
1487 | 135k | |
1488 | 135k | if (isCFIInstruction()) |
1489 | 135k | assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction"); |
1490 | 135k | |
1491 | 135k | SmallBitVector PrintedTypes(8); |
1492 | 135k | bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies()0 ; |
1493 | 420k | auto getTiedOperandIdx = [&](unsigned OpIdx) { |
1494 | 420k | if (!ShouldPrintRegisterTies) |
1495 | 0 | return 0U; |
1496 | 420k | const MachineOperand &MO = getOperand(OpIdx); |
1497 | 420k | if (MO.isReg() && MO.isTied()243k && !MO.isDef()2.04k ) |
1498 | 1.02k | return findTiedOperandIdx(OpIdx); |
1499 | 419k | return 0U; |
1500 | 419k | }; |
1501 | 135k | unsigned StartOp = 0; |
1502 | 135k | unsigned e = getNumOperands(); |
1503 | 135k | |
1504 | 135k | // Print explicitly defined operands on the left of an assignment syntax. |
1505 | 219k | while (StartOp < e) { |
1506 | 218k | const MachineOperand &MO = getOperand(StartOp); |
1507 | 218k | if (!MO.isReg() || !MO.isDef()125k || MO.isImplicit()84.3k ) |
1508 | 134k | break; |
1509 | 84.2k | |
1510 | 84.2k | if (StartOp != 0) |
1511 | 3.61k | OS << ", "; |
1512 | 84.2k | |
1513 | 84.2k | LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI)84.2k : LLT{}1 ; |
1514 | 84.2k | unsigned TiedOperandIdx = getTiedOperandIdx(StartOp); |
1515 | 84.2k | MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsStandalone, |
1516 | 84.2k | ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); |
1517 | 84.2k | ++StartOp; |
1518 | 84.2k | } |
1519 | 135k | |
1520 | 135k | if (StartOp != 0) |
1521 | 80.6k | OS << " = "; |
1522 | 135k | |
1523 | 135k | if (getFlag(MachineInstr::FrameSetup)) |
1524 | 1.65k | OS << "frame-setup "; |
1525 | 135k | if (getFlag(MachineInstr::FrameDestroy)) |
1526 | 412 | OS << "frame-destroy "; |
1527 | 135k | if (getFlag(MachineInstr::FmNoNans)) |
1528 | 0 | OS << "nnan "; |
1529 | 135k | if (getFlag(MachineInstr::FmNoInfs)) |
1530 | 1 | OS << "ninf "; |
1531 | 135k | if (getFlag(MachineInstr::FmNsz)) |
1532 | 1 | OS << "nsz "; |
1533 | 135k | if (getFlag(MachineInstr::FmArcp)) |
1534 | 1 | OS << "arcp "; |
1535 | 135k | if (getFlag(MachineInstr::FmContract)) |
1536 | 0 | OS << "contract "; |
1537 | 135k | if (getFlag(MachineInstr::FmAfn)) |
1538 | 0 | OS << "afn "; |
1539 | 135k | if (getFlag(MachineInstr::FmReassoc)) |
1540 | 0 | OS << "reassoc "; |
1541 | 135k | if (getFlag(MachineInstr::NoUWrap)) |
1542 | 3 | OS << "nuw "; |
1543 | 135k | if (getFlag(MachineInstr::NoSWrap)) |
1544 | 135 | OS << "nsw "; |
1545 | 135k | if (getFlag(MachineInstr::IsExact)) |
1546 | 0 | OS << "exact "; |
1547 | 135k | if (getFlag(MachineInstr::FPExcept)) |
1548 | 0 | OS << "fpexcept "; |
1549 | 135k | |
1550 | 135k | // Print the opcode name. |
1551 | 135k | if (TII) |
1552 | 135k | OS << TII->getName(getOpcode()); |
1553 | 1 | else |
1554 | 1 | OS << "UNKNOWN"; |
1555 | 135k | |
1556 | 135k | if (SkipOpers) |
1557 | 0 | return; |
1558 | 135k | |
1559 | 135k | // Print the rest of the operands. |
1560 | 135k | bool FirstOp = true; |
1561 | 135k | unsigned AsmDescOp = ~0u; |
1562 | 135k | unsigned AsmOpCount = 0; |
1563 | 135k | |
1564 | 135k | if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand0 ) { |
1565 | 0 | // Print asm string. |
1566 | 0 | OS << " "; |
1567 | 0 | const unsigned OpIdx = InlineAsm::MIOp_AsmString; |
1568 | 0 | LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{}; |
1569 | 0 | unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx); |
1570 | 0 | getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone, |
1571 | 0 | ShouldPrintRegisterTies, TiedOperandIdx, TRI, |
1572 | 0 | IntrinsicInfo); |
1573 | 0 |
|
1574 | 0 | // Print HasSideEffects, MayLoad, MayStore, IsAlignStack |
1575 | 0 | unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); |
1576 | 0 | if (ExtraInfo & InlineAsm::Extra_HasSideEffects) |
1577 | 0 | OS << " [sideeffect]"; |
1578 | 0 | if (ExtraInfo & InlineAsm::Extra_MayLoad) |
1579 | 0 | OS << " [mayload]"; |
1580 | 0 | if (ExtraInfo & InlineAsm::Extra_MayStore) |
1581 | 0 | OS << " [maystore]"; |
1582 | 0 | if (ExtraInfo & InlineAsm::Extra_IsConvergent) |
1583 | 0 | OS << " [isconvergent]"; |
1584 | 0 | if (ExtraInfo & InlineAsm::Extra_IsAlignStack) |
1585 | 0 | OS << " [alignstack]"; |
1586 | 0 | if (getInlineAsmDialect() == InlineAsm::AD_ATT) |
1587 | 0 | OS << " [attdialect]"; |
1588 | 0 | if (getInlineAsmDialect() == InlineAsm::AD_Intel) |
1589 | 0 | OS << " [inteldialect]"; |
1590 | 0 |
|
1591 | 0 | StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand; |
1592 | 0 | FirstOp = false; |
1593 | 0 | } |
1594 | 135k | |
1595 | 472k | for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i337k ) { |
1596 | 337k | const MachineOperand &MO = getOperand(i); |
1597 | 337k | |
1598 | 337k | if (FirstOp) FirstOp = false134k ; else OS << ","203k ; |
1599 | 337k | OS << " "; |
1600 | 337k | |
1601 | 337k | if (isDebugValue() && MO.isMetadata()4.81k ) { |
1602 | 2.40k | // Pretty print DBG_VALUE instructions. |
1603 | 2.40k | auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata()); |
1604 | 2.40k | if (DIV && !DIV->getName().empty()1.20k ) |
1605 | 1.20k | OS << "!\"" << DIV->getName() << '\"'; |
1606 | 1.20k | else { |
1607 | 1.20k | LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{}0 ; |
1608 | 1.20k | unsigned TiedOperandIdx = getTiedOperandIdx(i); |
1609 | 1.20k | MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone, |
1610 | 1.20k | ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); |
1611 | 1.20k | } |
1612 | 335k | } else if (isDebugLabel() && MO.isMetadata()0 ) { |
1613 | 0 | // Pretty print DBG_LABEL instructions. |
1614 | 0 | auto *DIL = dyn_cast<DILabel>(MO.getMetadata()); |
1615 | 0 | if (DIL && !DIL->getName().empty()) |
1616 | 0 | OS << "\"" << DIL->getName() << '\"'; |
1617 | 0 | else { |
1618 | 0 | LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{}; |
1619 | 0 | unsigned TiedOperandIdx = getTiedOperandIdx(i); |
1620 | 0 | MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone, |
1621 | 0 | ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); |
1622 | 0 | } |
1623 | 335k | } else if (i == AsmDescOp && MO.isImm()0 ) { |
1624 | 0 | // Pretty print the inline asm operand descriptor. |
1625 | 0 | OS << '$' << AsmOpCount++; |
1626 | 0 | unsigned Flag = MO.getImm(); |
1627 | 0 | switch (InlineAsm::getKind(Flag)) { |
1628 | 0 | case InlineAsm::Kind_RegUse: OS << ":[reguse"; break; |
1629 | 0 | case InlineAsm::Kind_RegDef: OS << ":[regdef"; break; |
1630 | 0 | case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break; |
1631 | 0 | case InlineAsm::Kind_Clobber: OS << ":[clobber"; break; |
1632 | 0 | case InlineAsm::Kind_Imm: OS << ":[imm"; break; |
1633 | 0 | case InlineAsm::Kind_Mem: OS << ":[mem"; break; |
1634 | 0 | default: OS << ":[??" << InlineAsm::getKind(Flag); break; |
1635 | 0 | } |
1636 | 0 | |
1637 | 0 | unsigned RCID = 0; |
1638 | 0 | if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && |
1639 | 0 | InlineAsm::hasRegClassConstraint(Flag, RCID)) { |
1640 | 0 | if (TRI) { |
1641 | 0 | OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); |
1642 | 0 | } else |
1643 | 0 | OS << ":RC" << RCID; |
1644 | 0 | } |
1645 | 0 |
|
1646 | 0 | if (InlineAsm::isMemKind(Flag)) { |
1647 | 0 | unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); |
1648 | 0 | switch (MCID) { |
1649 | 0 | case InlineAsm::Constraint_es: OS << ":es"; break; |
1650 | 0 | case InlineAsm::Constraint_i: OS << ":i"; break; |
1651 | 0 | case InlineAsm::Constraint_m: OS << ":m"; break; |
1652 | 0 | case InlineAsm::Constraint_o: OS << ":o"; break; |
1653 | 0 | case InlineAsm::Constraint_v: OS << ":v"; break; |
1654 | 0 | case InlineAsm::Constraint_Q: OS << ":Q"; break; |
1655 | 0 | case InlineAsm::Constraint_R: OS << ":R"; break; |
1656 | 0 | case InlineAsm::Constraint_S: OS << ":S"; break; |
1657 | 0 | case InlineAsm::Constraint_T: OS << ":T"; break; |
1658 | 0 | case InlineAsm::Constraint_Um: OS << ":Um"; break; |
1659 | 0 | case InlineAsm::Constraint_Un: OS << ":Un"; break; |
1660 | 0 | case InlineAsm::Constraint_Uq: OS << ":Uq"; break; |
1661 | 0 | case InlineAsm::Constraint_Us: OS << ":Us"; break; |
1662 | 0 | case InlineAsm::Constraint_Ut: OS << ":Ut"; break; |
1663 | 0 | case InlineAsm::Constraint_Uv: OS << ":Uv"; break; |
1664 | 0 | case InlineAsm::Constraint_Uy: OS << ":Uy"; break; |
1665 | 0 | case InlineAsm::Constraint_X: OS << ":X"; break; |
1666 | 0 | case InlineAsm::Constraint_Z: OS << ":Z"; break; |
1667 | 0 | case InlineAsm::Constraint_ZC: OS << ":ZC"; break; |
1668 | 0 | case InlineAsm::Constraint_Zy: OS << ":Zy"; break; |
1669 | 0 | default: OS << ":?"; break; |
1670 | 0 | } |
1671 | 0 | } |
1672 | 0 | |
1673 | 0 | unsigned TiedTo = 0; |
1674 | 0 | if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) |
1675 | 0 | OS << " tiedto:$" << TiedTo; |
1676 | 0 |
|
1677 | 0 | OS << ']'; |
1678 | 0 |
|
1679 | 0 | // Compute the index of the next operand descriptor. |
1680 | 0 | AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag); |
1681 | 335k | } else { |
1682 | 335k | LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{}0 ; |
1683 | 335k | unsigned TiedOperandIdx = getTiedOperandIdx(i); |
1684 | 335k | if (MO.isImm() && isOperandSubregIdx(i)113k ) |
1685 | 1.23k | MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI); |
1686 | 334k | else |
1687 | 334k | MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone, |
1688 | 334k | ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); |
1689 | 335k | } |
1690 | 337k | } |
1691 | 135k | |
1692 | 135k | // Print any optional symbols attached to this instruction as-if they were |
1693 | 135k | // operands. |
1694 | 135k | if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) { |
1695 | 0 | if (!FirstOp) { |
1696 | 0 | FirstOp = false; |
1697 | 0 | OS << ','; |
1698 | 0 | } |
1699 | 0 | OS << " pre-instr-symbol "; |
1700 | 0 | MachineOperand::printSymbol(OS, *PreInstrSymbol); |
1701 | 0 | } |
1702 | 135k | if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) { |
1703 | 0 | if (!FirstOp) { |
1704 | 0 | FirstOp = false; |
1705 | 0 | OS << ','; |
1706 | 0 | } |
1707 | 0 | OS << " post-instr-symbol "; |
1708 | 0 | MachineOperand::printSymbol(OS, *PostInstrSymbol); |
1709 | 0 | } |
1710 | 135k | |
1711 | 135k | if (!SkipDebugLoc) { |
1712 | 134k | if (const DebugLoc &DL = getDebugLoc()) { |
1713 | 3.99k | if (!FirstOp) |
1714 | 3.99k | OS << ','; |
1715 | 3.99k | OS << " debug-location "; |
1716 | 3.99k | DL->printAsOperand(OS, MST); |
1717 | 3.99k | } |
1718 | 134k | } |
1719 | 135k | |
1720 | 135k | if (!memoperands_empty()) { |
1721 | 1.65k | SmallVector<StringRef, 0> SSNs; |
1722 | 1.65k | const LLVMContext *Context = nullptr; |
1723 | 1.65k | std::unique_ptr<LLVMContext> CtxPtr; |
1724 | 1.65k | const MachineFrameInfo *MFI = nullptr; |
1725 | 1.65k | if (const MachineFunction *MF = getMFIfAvailable(*this)) { |
1726 | 1.65k | MFI = &MF->getFrameInfo(); |
1727 | 1.65k | Context = &MF->getFunction().getContext(); |
1728 | 1.65k | } else { |
1729 | 0 | CtxPtr = llvm::make_unique<LLVMContext>(); |
1730 | 0 | Context = CtxPtr.get(); |
1731 | 0 | } |
1732 | 1.65k | |
1733 | 1.65k | OS << " :: "; |
1734 | 1.65k | bool NeedComma = false; |
1735 | 2.45k | for (const MachineMemOperand *Op : memoperands()) { |
1736 | 2.45k | if (NeedComma) |
1737 | 796 | OS << ", "; |
1738 | 2.45k | Op->print(OS, MST, SSNs, *Context, MFI, TII); |
1739 | 2.45k | NeedComma = true; |
1740 | 2.45k | } |
1741 | 1.65k | } |
1742 | 135k | |
1743 | 135k | if (SkipDebugLoc) |
1744 | 305 | return; |
1745 | 134k | |
1746 | 134k | bool HaveSemi = false; |
1747 | 134k | |
1748 | 134k | // Print debug location information. |
1749 | 134k | if (const DebugLoc &DL = getDebugLoc()) { |
1750 | 3.99k | if (!HaveSemi) { |
1751 | 3.99k | OS << ';'; |
1752 | 3.99k | HaveSemi = true; |
1753 | 3.99k | } |
1754 | 3.99k | OS << ' '; |
1755 | 3.99k | DL.print(OS); |
1756 | 3.99k | } |
1757 | 134k | |
1758 | 134k | // Print extra comments for DEBUG_VALUE. |
1759 | 134k | if (isDebugValue() && getOperand(e - 2).isMetadata()1.20k ) { |
1760 | 1.20k | if (!HaveSemi) { |
1761 | 0 | OS << ";"; |
1762 | 0 | HaveSemi = true; |
1763 | 0 | } |
1764 | 1.20k | auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata()); |
1765 | 1.20k | OS << " line no:" << DV->getLine(); |
1766 | 1.20k | if (auto *InlinedAt = debugLoc->getInlinedAt()) { |
1767 | 0 | DebugLoc InlinedAtDL(InlinedAt); |
1768 | 0 | if (InlinedAtDL && MF) { |
1769 | 0 | OS << " inlined @[ "; |
1770 | 0 | InlinedAtDL.print(OS); |
1771 | 0 | OS << " ]"; |
1772 | 0 | } |
1773 | 0 | } |
1774 | 1.20k | if (isIndirectDebugValue()) |
1775 | 0 | OS << " indirect"; |
1776 | 1.20k | } |
1777 | 134k | // TODO: DBG_LABEL |
1778 | 134k | |
1779 | 134k | if (AddNewLine) |
1780 | 283 | OS << '\n'; |
1781 | 134k | } |
1782 | | |
1783 | | bool MachineInstr::addRegisterKilled(unsigned IncomingReg, |
1784 | | const TargetRegisterInfo *RegInfo, |
1785 | 47.3M | bool AddIfNotFound) { |
1786 | 47.3M | bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg); |
1787 | 47.3M | bool hasAliases = isPhysReg && |
1788 | 47.3M | MCRegAliasIterator(IncomingReg, RegInfo, false).isValid()27.1M ; |
1789 | 47.3M | bool Found = false; |
1790 | 47.3M | SmallVector<unsigned,4> DeadOps; |
1791 | 231M | for (unsigned i = 0, e = getNumOperands(); i != e; ++i183M ) { |
1792 | 203M | MachineOperand &MO = getOperand(i); |
1793 | 203M | if (!MO.isReg() || !MO.isUse()151M || MO.isUndef()110M ) |
1794 | 93.6M | continue; |
1795 | 109M | |
1796 | 109M | // DEBUG_VALUE nodes do not contribute to code generation and should |
1797 | 109M | // always be ignored. Failure to do so may result in trying to modify |
1798 | 109M | // KILL flags on DEBUG_VALUE nodes. |
1799 | 109M | if (MO.isDebug()) |
1800 | 0 | continue; |
1801 | 109M | |
1802 | 109M | unsigned Reg = MO.getReg(); |
1803 | 109M | if (!Reg) |
1804 | 1.94M | continue; |
1805 | 108M | |
1806 | 108M | if (Reg == IncomingReg) { |
1807 | 35.8M | if (!Found) { |
1808 | 35.7M | if (MO.isKill()) |
1809 | 9.26M | // The register is already marked kill. |
1810 | 9.26M | return true; |
1811 | 26.4M | if (isPhysReg && isRegTiedToDefOperand(i)6.40M ) |
1812 | 0 | // Two-address uses of physregs must not be marked kill. |
1813 | 0 | return true; |
1814 | 26.4M | MO.setIsKill(); |
1815 | 26.4M | Found = true; |
1816 | 26.4M | } |
1817 | 72.1M | } else if (hasAliases && MO.isKill()61.5M && |
1818 | 72.1M | TargetRegisterInfo::isPhysicalRegister(Reg)31.6M ) { |
1819 | 31.6M | // A super-register kill already exists. |
1820 | 31.6M | if (RegInfo->isSuperRegister(IncomingReg, Reg)) |
1821 | 10.3M | return true; |
1822 | 21.2M | if (RegInfo->isSubRegister(IncomingReg, Reg)) |
1823 | 1.14M | DeadOps.push_back(i); |
1824 | 21.2M | } |
1825 | 108M | } |
1826 | 47.3M | |
1827 | 47.3M | // Trim unneeded kill operands. |
1828 | 47.3M | while (27.6M !DeadOps.empty()28.8M ) { |
1829 | 1.14M | unsigned OpIdx = DeadOps.back(); |
1830 | 1.14M | if (getOperand(OpIdx).isImplicit() && |
1831 | 1.14M | (913k !isInlineAsm()913k || findInlineAsmFlagIdx(OpIdx) < 0331 )) |
1832 | 913k | RemoveOperand(OpIdx); |
1833 | 233k | else |
1834 | 233k | getOperand(OpIdx).setIsKill(false); |
1835 | 1.14M | DeadOps.pop_back(); |
1836 | 1.14M | } |
1837 | 27.6M | |
1838 | 27.6M | // If not found, this means an alias of one of the operands is killed. Add a |
1839 | 27.6M | // new implicit operand if required. |
1840 | 27.6M | if (!Found && AddIfNotFound1.18M ) { |
1841 | 1.15M | addOperand(MachineOperand::CreateReg(IncomingReg, |
1842 | 1.15M | false /*IsDef*/, |
1843 | 1.15M | true /*IsImp*/, |
1844 | 1.15M | true /*IsKill*/)); |
1845 | 1.15M | return true; |
1846 | 1.15M | } |
1847 | 26.5M | return Found; |
1848 | 26.5M | } |
1849 | | |
1850 | | void MachineInstr::clearRegisterKills(unsigned Reg, |
1851 | 2.01M | const TargetRegisterInfo *RegInfo) { |
1852 | 2.01M | if (!TargetRegisterInfo::isPhysicalRegister(Reg)) |
1853 | 169k | RegInfo = nullptr; |
1854 | 6.23M | for (MachineOperand &MO : operands()) { |
1855 | 6.23M | if (!MO.isReg() || !MO.isUse()4.26M || !MO.isKill()2.43M ) |
1856 | 5.74M | continue; |
1857 | 490k | unsigned OpReg = MO.getReg(); |
1858 | 490k | if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)466k ) || Reg == OpReg463k ) |
1859 | 26.1k | MO.setIsKill(false); |
1860 | 490k | } |
1861 | 2.01M | } |
1862 | | |
1863 | | bool MachineInstr::addRegisterDead(unsigned Reg, |
1864 | | const TargetRegisterInfo *RegInfo, |
1865 | 5.45M | bool AddIfNotFound) { |
1866 | 5.45M | bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg); |
1867 | 5.45M | bool hasAliases = isPhysReg && |
1868 | 5.45M | MCRegAliasIterator(Reg, RegInfo, false).isValid()4.76M ; |
1869 | 5.45M | bool Found = false; |
1870 | 5.45M | SmallVector<unsigned,4> DeadOps; |
1871 | 36.1M | for (unsigned i = 0, e = getNumOperands(); i != e; ++i30.7M ) { |
1872 | 32.9M | MachineOperand &MO = getOperand(i); |
1873 | 32.9M | if (!MO.isReg() || !MO.isDef()20.0M ) |
1874 | 20.6M | continue; |
1875 | 12.2M | unsigned MOReg = MO.getReg(); |
1876 | 12.2M | if (!MOReg) |
1877 | 0 | continue; |
1878 | 12.2M | |
1879 | 12.2M | if (MOReg == Reg) { |
1880 | 3.15M | MO.setIsDead(); |
1881 | 3.15M | Found = true; |
1882 | 9.10M | } else if (hasAliases && MO.isDead()7.49M && |
1883 | 9.10M | TargetRegisterInfo::isPhysicalRegister(MOReg)4.35M ) { |
1884 | 4.35M | // There exists a super-register that's marked dead. |
1885 | 4.35M | if (RegInfo->isSuperRegister(Reg, MOReg)) |
1886 | 2.23M | return true; |
1887 | 2.12M | if (RegInfo->isSubRegister(Reg, MOReg)) |
1888 | 71.3k | DeadOps.push_back(i); |
1889 | 2.12M | } |
1890 | 12.2M | } |
1891 | 5.45M | |
1892 | 5.45M | // Trim unneeded dead operands. |
1893 | 5.45M | while (3.22M !DeadOps.empty()3.29M ) { |
1894 | 71.3k | unsigned OpIdx = DeadOps.back(); |
1895 | 71.3k | if (getOperand(OpIdx).isImplicit() && |
1896 | 71.3k | (71.0k !isInlineAsm()71.0k || findInlineAsmFlagIdx(OpIdx) < 010.8k )) |
1897 | 70.9k | RemoveOperand(OpIdx); |
1898 | 331 | else |
1899 | 331 | getOperand(OpIdx).setIsDead(false); |
1900 | 71.3k | DeadOps.pop_back(); |
1901 | 71.3k | } |
1902 | 3.22M | |
1903 | 3.22M | // If not found, this means an alias of one of the operands is dead. Add a |
1904 | 3.22M | // new implicit operand if required. |
1905 | 3.22M | if (Found || !AddIfNotFound71.2k ) |
1906 | 3.15M | return Found; |
1907 | 71.2k | |
1908 | 71.2k | addOperand(MachineOperand::CreateReg(Reg, |
1909 | 71.2k | true /*IsDef*/, |
1910 | 71.2k | true /*IsImp*/, |
1911 | 71.2k | false /*IsKill*/, |
1912 | 71.2k | true /*IsDead*/)); |
1913 | 71.2k | return true; |
1914 | 71.2k | } |
1915 | | |
1916 | 169k | void MachineInstr::clearRegisterDeads(unsigned Reg) { |
1917 | 505k | for (MachineOperand &MO : operands()) { |
1918 | 505k | if (!MO.isReg() || !MO.isDef()337k || MO.getReg() != Reg180k ) |
1919 | 336k | continue; |
1920 | 168k | MO.setIsDead(false); |
1921 | 168k | } |
1922 | 169k | } |
1923 | | |
1924 | 724k | void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) { |
1925 | 3.13M | for (MachineOperand &MO : operands()) { |
1926 | 3.13M | if (!MO.isReg() || !MO.isDef()2.08M || MO.getReg() != Reg851k || MO.getSubReg() == 0724k ) |
1927 | 2.98M | continue; |
1928 | 155k | MO.setIsUndef(IsUndef); |
1929 | 155k | } |
1930 | 724k | } |
1931 | | |
1932 | | void MachineInstr::addRegisterDefined(unsigned Reg, |
1933 | 1.17M | const TargetRegisterInfo *RegInfo) { |
1934 | 1.17M | if (TargetRegisterInfo::isPhysicalRegister(Reg)) { |
1935 | 1.17M | MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo); |
1936 | 1.17M | if (MO) |
1937 | 75 | return; |
1938 | 0 | } else { |
1939 | 0 | for (const MachineOperand &MO : operands()) { |
1940 | 0 | if (MO.isReg() && MO.getReg() == Reg && MO.isDef() && |
1941 | 0 | MO.getSubReg() == 0) |
1942 | 0 | return; |
1943 | 0 | } |
1944 | 0 | } |
1945 | 1.17M | addOperand(MachineOperand::CreateReg(Reg, |
1946 | 1.16M | true /*IsDef*/, |
1947 | 1.16M | true /*IsImp*/)); |
1948 | 1.16M | } |
1949 | | |
1950 | | void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs, |
1951 | 2.14M | const TargetRegisterInfo &TRI) { |
1952 | 2.14M | bool HasRegMask = false; |
1953 | 11.0M | for (MachineOperand &MO : operands()) { |
1954 | 11.0M | if (MO.isRegMask()) { |
1955 | 395k | HasRegMask = true; |
1956 | 395k | continue; |
1957 | 395k | } |
1958 | 10.6M | if (!MO.isReg() || !MO.isDef()7.40M ) continue7.42M ; |
1959 | 3.20M | unsigned Reg = MO.getReg(); |
1960 | 3.20M | if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue747k ; |
1961 | 2.45M | // If there are no uses, including partial uses, the def is dead. |
1962 | 2.45M | if (llvm::none_of(UsedRegs, |
1963 | 2.45M | [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }1.20M )) |
1964 | 2.02M | MO.setIsDead(); |
1965 | 2.45M | } |
1966 | 2.14M | |
1967 | 2.14M | // This is a call with a register mask operand. |
1968 | 2.14M | // Mask clobbers are always dead, so add defs for the non-dead defines. |
1969 | 2.14M | if (HasRegMask) |
1970 | 395k | for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end(); |
1971 | 1.13M | I != E; ++I738k ) |
1972 | 738k | addRegisterDefined(*I, &TRI); |
1973 | 2.14M | } |
1974 | | |
1975 | | unsigned |
1976 | 28.6M | MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) { |
1977 | 28.6M | // Build up a buffer of hash code components. |
1978 | 28.6M | SmallVector<size_t, 8> HashComponents; |
1979 | 28.6M | HashComponents.reserve(MI->getNumOperands() + 1); |
1980 | 28.6M | HashComponents.push_back(MI->getOpcode()); |
1981 | 106M | for (const MachineOperand &MO : MI->operands()) { |
1982 | 106M | if (MO.isReg() && MO.isDef()72.9M && |
1983 | 106M | TargetRegisterInfo::isVirtualRegister(MO.getReg())34.3M ) |
1984 | 25.9M | continue; // Skip virtual register defs. |
1985 | 80.7M | |
1986 | 80.7M | HashComponents.push_back(hash_value(MO)); |
1987 | 80.7M | } |
1988 | 28.6M | return hash_combine_range(HashComponents.begin(), HashComponents.end()); |
1989 | 28.6M | } |
1990 | | |
1991 | 111 | void MachineInstr::emitError(StringRef Msg) const { |
1992 | 111 | // Find the source location cookie. |
1993 | 111 | unsigned LocCookie = 0; |
1994 | 111 | const MDNode *LocMD = nullptr; |
1995 | 2.97k | for (unsigned i = getNumOperands(); i != 0; --i2.86k ) { |
1996 | 2.88k | if (getOperand(i-1).isMetadata() && |
1997 | 2.88k | (LocMD = getOperand(i-1).getMetadata())12 && |
1998 | 2.88k | LocMD->getNumOperands() != 012 ) { |
1999 | 12 | if (const ConstantInt *CI = |
2000 | 12 | mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) { |
2001 | 12 | LocCookie = CI->getZExtValue(); |
2002 | 12 | break; |
2003 | 12 | } |
2004 | 12 | } |
2005 | 2.88k | } |
2006 | 111 | |
2007 | 111 | if (const MachineBasicBlock *MBB = getParent()) |
2008 | 111 | if (const MachineFunction *MF = MBB->getParent()) |
2009 | 111 | return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg); |
2010 | 0 | report_fatal_error(Msg); |
2011 | 0 | } |
2012 | | |
2013 | | MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL, |
2014 | | const MCInstrDesc &MCID, bool IsIndirect, |
2015 | | unsigned Reg, const MDNode *Variable, |
2016 | 2.33k | const MDNode *Expr) { |
2017 | 2.33k | assert(isa<DILocalVariable>(Variable) && "not a variable"); |
2018 | 2.33k | assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); |
2019 | 2.33k | assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && |
2020 | 2.33k | "Expected inlined-at fields to agree"); |
2021 | 2.33k | auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug); |
2022 | 2.33k | if (IsIndirect) |
2023 | 270 | MIB.addImm(0U); |
2024 | 2.06k | else |
2025 | 2.06k | MIB.addReg(0U, RegState::Debug); |
2026 | 2.33k | return MIB.addMetadata(Variable).addMetadata(Expr); |
2027 | 2.33k | } |
2028 | | |
2029 | | MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL, |
2030 | | const MCInstrDesc &MCID, bool IsIndirect, |
2031 | | MachineOperand &MO, const MDNode *Variable, |
2032 | 5.41k | const MDNode *Expr) { |
2033 | 5.41k | assert(isa<DILocalVariable>(Variable) && "not a variable"); |
2034 | 5.41k | assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); |
2035 | 5.41k | assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && |
2036 | 5.41k | "Expected inlined-at fields to agree"); |
2037 | 5.41k | if (MO.isReg()) |
2038 | 1.12k | return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr); |
2039 | 4.29k | |
2040 | 4.29k | auto MIB = BuildMI(MF, DL, MCID).add(MO); |
2041 | 4.29k | if (IsIndirect) |
2042 | 72 | MIB.addImm(0U); |
2043 | 4.22k | else |
2044 | 4.22k | MIB.addReg(0U, RegState::Debug); |
2045 | 4.29k | return MIB.addMetadata(Variable).addMetadata(Expr); |
2046 | 4.29k | } |
2047 | | |
2048 | | MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB, |
2049 | | MachineBasicBlock::iterator I, |
2050 | | const DebugLoc &DL, const MCInstrDesc &MCID, |
2051 | | bool IsIndirect, unsigned Reg, |
2052 | 1.08k | const MDNode *Variable, const MDNode *Expr) { |
2053 | 1.08k | MachineFunction &MF = *BB.getParent(); |
2054 | 1.08k | MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr); |
2055 | 1.08k | BB.insert(I, MI); |
2056 | 1.08k | return MachineInstrBuilder(MF, MI); |
2057 | 1.08k | } |
2058 | | |
2059 | | MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB, |
2060 | | MachineBasicBlock::iterator I, |
2061 | | const DebugLoc &DL, const MCInstrDesc &MCID, |
2062 | | bool IsIndirect, MachineOperand &MO, |
2063 | 5.13k | const MDNode *Variable, const MDNode *Expr) { |
2064 | 5.13k | MachineFunction &MF = *BB.getParent(); |
2065 | 5.13k | MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr); |
2066 | 5.13k | BB.insert(I, MI); |
2067 | 5.13k | return MachineInstrBuilder(MF, *MI); |
2068 | 5.13k | } |
2069 | | |
2070 | | /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot. |
2071 | | /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE. |
2072 | 30 | static const DIExpression *computeExprForSpill(const MachineInstr &MI) { |
2073 | 30 | assert(MI.getOperand(0).isReg() && "can't spill non-register"); |
2074 | 30 | assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && |
2075 | 30 | "Expected inlined-at fields to agree"); |
2076 | 30 | |
2077 | 30 | const DIExpression *Expr = MI.getDebugExpression(); |
2078 | 30 | if (MI.isIndirectDebugValue()) { |
2079 | 16 | assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset"); |
2080 | 16 | Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); |
2081 | 16 | } |
2082 | 30 | return Expr; |
2083 | 30 | } |
2084 | | |
2085 | | MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB, |
2086 | | MachineBasicBlock::iterator I, |
2087 | | const MachineInstr &Orig, |
2088 | 28 | int FrameIndex) { |
2089 | 28 | const DIExpression *Expr = computeExprForSpill(Orig); |
2090 | 28 | return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc()) |
2091 | 28 | .addFrameIndex(FrameIndex) |
2092 | 28 | .addImm(0U) |
2093 | 28 | .addMetadata(Orig.getDebugVariable()) |
2094 | 28 | .addMetadata(Expr); |
2095 | 28 | } |
2096 | | |
2097 | 2 | void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) { |
2098 | 2 | const DIExpression *Expr = computeExprForSpill(Orig); |
2099 | 2 | Orig.getOperand(0).ChangeToFrameIndex(FrameIndex); |
2100 | 2 | Orig.getOperand(1).ChangeToImmediate(0U); |
2101 | 2 | Orig.getOperand(3).setMetadata(Expr); |
2102 | 2 | } |
2103 | | |
2104 | | void MachineInstr::collectDebugValues( |
2105 | 3.28M | SmallVectorImpl<MachineInstr *> &DbgValues) { |
2106 | 3.28M | MachineInstr &MI = *this; |
2107 | 3.28M | if (!MI.getOperand(0).isReg()) |
2108 | 2 | return; |
2109 | 3.28M | |
2110 | 3.28M | MachineBasicBlock::iterator DI = MI; ++DI; |
2111 | 3.28M | for (MachineBasicBlock::iterator DE = MI.getParent()->end(); |
2112 | 3.28M | DI != DE; ++DI67 ) { |
2113 | 3.28M | if (!DI->isDebugValue()) |
2114 | 3.28M | return; |
2115 | 67 | if (DI->getOperand(0).isReg() && |
2116 | 67 | DI->getOperand(0).getReg() == MI.getOperand(0).getReg()60 ) |
2117 | 39 | DbgValues.push_back(&*DI); |
2118 | 67 | } |
2119 | 3.28M | } |
2120 | | |
2121 | 362k | void MachineInstr::changeDebugValuesDefReg(unsigned Reg) { |
2122 | 362k | // Collect matching debug values. |
2123 | 362k | SmallVector<MachineInstr *, 2> DbgValues; |
2124 | 362k | collectDebugValues(DbgValues); |
2125 | 362k | |
2126 | 362k | // Propagate Reg to debug value instructions. |
2127 | 362k | for (auto *DBI : DbgValues) |
2128 | 12 | DBI->getOperand(0).setReg(Reg); |
2129 | 362k | } |
2130 | | |
2131 | | using MMOList = SmallVector<const MachineMemOperand *, 2>; |
2132 | | |
2133 | | static unsigned getSpillSlotSize(MMOList &Accesses, |
2134 | 106k | const MachineFrameInfo &MFI) { |
2135 | 106k | unsigned Size = 0; |
2136 | 106k | for (auto A : Accesses) |
2137 | 111k | if (MFI.isSpillSlotObjectIndex( |
2138 | 111k | cast<FixedStackPseudoSourceValue>(A->getPseudoValue()) |
2139 | 111k | ->getFrameIndex())) |
2140 | 47.6k | Size += A->getSize(); |
2141 | 106k | return Size; |
2142 | 106k | } |
2143 | | |
2144 | | Optional<unsigned> |
2145 | 1.94M | MachineInstr::getSpillSize(const TargetInstrInfo *TII) const { |
2146 | 1.94M | int FI; |
2147 | 1.94M | if (TII->isStoreToStackSlotPostFE(*this, FI)) { |
2148 | 19.2k | const MachineFrameInfo &MFI = getMF()->getFrameInfo(); |
2149 | 19.2k | if (MFI.isSpillSlotObjectIndex(FI)) |
2150 | 11.4k | return (*memoperands_begin())->getSize(); |
2151 | 1.93M | } |
2152 | 1.93M | return None; |
2153 | 1.93M | } |
2154 | | |
2155 | | Optional<unsigned> |
2156 | 1.93M | MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const { |
2157 | 1.93M | MMOList Accesses; |
2158 | 1.93M | if (TII->hasStoreToStackSlot(*this, Accesses)) |
2159 | 36.6k | return getSpillSlotSize(Accesses, getMF()->getFrameInfo()); |
2160 | 1.89M | return None; |
2161 | 1.89M | } |
2162 | | |
2163 | | Optional<unsigned> |
2164 | 2.02M | MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const { |
2165 | 2.02M | int FI; |
2166 | 2.02M | if (TII->isLoadFromStackSlotPostFE(*this, FI)) { |
2167 | 34.2k | const MachineFrameInfo &MFI = getMF()->getFrameInfo(); |
2168 | 34.2k | if (MFI.isSpillSlotObjectIndex(FI)) |
2169 | 9.87k | return (*memoperands_begin())->getSize(); |
2170 | 2.01M | } |
2171 | 2.01M | return None; |
2172 | 2.01M | } |
2173 | | |
2174 | | Optional<unsigned> |
2175 | 2.01M | MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const { |
2176 | 2.01M | MMOList Accesses; |
2177 | 2.01M | if (TII->hasLoadFromStackSlot(*this, Accesses)) |
2178 | 69.4k | return getSpillSlotSize(Accesses, getMF()->getFrameInfo()); |
2179 | 1.94M | return None; |
2180 | 1.94M | } |