/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- PPCInstrInfo.cpp - PowerPC Instruction Information ----------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file contains the PowerPC implementation of the TargetInstrInfo class. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "PPCInstrInfo.h" |
14 | | #include "MCTargetDesc/PPCPredicates.h" |
15 | | #include "PPC.h" |
16 | | #include "PPCHazardRecognizers.h" |
17 | | #include "PPCInstrBuilder.h" |
18 | | #include "PPCMachineFunctionInfo.h" |
19 | | #include "PPCTargetMachine.h" |
20 | | #include "llvm/ADT/STLExtras.h" |
21 | | #include "llvm/ADT/Statistic.h" |
22 | | #include "llvm/CodeGen/LiveIntervals.h" |
23 | | #include "llvm/CodeGen/MachineFrameInfo.h" |
24 | | #include "llvm/CodeGen/MachineFunctionPass.h" |
25 | | #include "llvm/CodeGen/MachineInstrBuilder.h" |
26 | | #include "llvm/CodeGen/MachineMemOperand.h" |
27 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
28 | | #include "llvm/CodeGen/PseudoSourceValue.h" |
29 | | #include "llvm/CodeGen/ScheduleDAG.h" |
30 | | #include "llvm/CodeGen/SlotIndexes.h" |
31 | | #include "llvm/CodeGen/StackMaps.h" |
32 | | #include "llvm/MC/MCAsmInfo.h" |
33 | | #include "llvm/MC/MCInst.h" |
34 | | #include "llvm/Support/CommandLine.h" |
35 | | #include "llvm/Support/Debug.h" |
36 | | #include "llvm/Support/ErrorHandling.h" |
37 | | #include "llvm/Support/TargetRegistry.h" |
38 | | #include "llvm/Support/raw_ostream.h" |
39 | | |
40 | | using namespace llvm; |
41 | | |
42 | | #define DEBUG_TYPE "ppc-instr-info" |
43 | | |
44 | | #define GET_INSTRMAP_INFO |
45 | | #define GET_INSTRINFO_CTOR_DTOR |
46 | | #include "PPCGenInstrInfo.inc" |
47 | | |
48 | | STATISTIC(NumStoreSPILLVSRRCAsVec, |
49 | | "Number of spillvsrrc spilled to stack as vec"); |
50 | | STATISTIC(NumStoreSPILLVSRRCAsGpr, |
51 | | "Number of spillvsrrc spilled to stack as gpr"); |
52 | | STATISTIC(NumGPRtoVSRSpill, "Number of gpr spills to spillvsrrc"); |
53 | | STATISTIC(CmpIselsConverted, |
54 | | "Number of ISELs that depend on comparison of constants converted"); |
55 | | STATISTIC(MissedConvertibleImmediateInstrs, |
56 | | "Number of compare-immediate instructions fed by constants"); |
57 | | STATISTIC(NumRcRotatesConvertedToRcAnd, |
58 | | "Number of record-form rotates converted to record-form andi"); |
59 | | |
60 | | static cl:: |
61 | | opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, |
62 | | cl::desc("Disable analysis for CTR loops")); |
63 | | |
64 | | static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt", |
65 | | cl::desc("Disable compare instruction optimization"), cl::Hidden); |
66 | | |
67 | | static cl::opt<bool> VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", |
68 | | cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), |
69 | | cl::Hidden); |
70 | | |
71 | | static cl::opt<bool> |
72 | | UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, |
73 | | cl::desc("Use the old (incorrect) instruction latency calculation")); |
74 | | |
75 | | // Index into the OpcodesForSpill array. |
76 | | enum SpillOpcodeKey { |
77 | | SOK_Int4Spill, |
78 | | SOK_Int8Spill, |
79 | | SOK_Float8Spill, |
80 | | SOK_Float4Spill, |
81 | | SOK_CRSpill, |
82 | | SOK_CRBitSpill, |
83 | | SOK_VRVectorSpill, |
84 | | SOK_VSXVectorSpill, |
85 | | SOK_VectorFloat8Spill, |
86 | | SOK_VectorFloat4Spill, |
87 | | SOK_VRSaveSpill, |
88 | | SOK_QuadFloat8Spill, |
89 | | SOK_QuadFloat4Spill, |
90 | | SOK_QuadBitSpill, |
91 | | SOK_SpillToVSR, |
92 | | SOK_SPESpill, |
93 | | SOK_SPE4Spill, |
94 | | SOK_LastOpcodeSpill // This must be last on the enum. |
95 | | }; |
96 | | |
97 | | // Pin the vtable to this file. |
98 | 0 | void PPCInstrInfo::anchor() {} |
99 | | |
100 | | PPCInstrInfo::PPCInstrInfo(PPCSubtarget &STI) |
101 | | : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP, |
102 | | /* CatchRetOpcode */ -1, |
103 | | STI.isPPC64() ? PPC::BLR8 : PPC::BLR), |
104 | 1.85k | Subtarget(STI), RI(STI.getTargetMachine()) {} |
105 | | |
106 | | /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for |
107 | | /// this target when scheduling the DAG. |
108 | | ScheduleHazardRecognizer * |
109 | | PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, |
110 | 0 | const ScheduleDAG *DAG) const { |
111 | 0 | unsigned Directive = |
112 | 0 | static_cast<const PPCSubtarget *>(STI)->getDarwinDirective(); |
113 | 0 | if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 || |
114 | 0 | Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) { |
115 | 0 | const InstrItineraryData *II = |
116 | 0 | static_cast<const PPCSubtarget *>(STI)->getInstrItineraryData(); |
117 | 0 | return new ScoreboardHazardRecognizer(II, DAG); |
118 | 0 | } |
119 | 0 | |
120 | 0 | return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG); |
121 | 0 | } |
122 | | |
123 | | /// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer |
124 | | /// to use for this target when scheduling the DAG. |
125 | | ScheduleHazardRecognizer * |
126 | | PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, |
127 | 0 | const ScheduleDAG *DAG) const { |
128 | 0 | unsigned Directive = |
129 | 0 | DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective(); |
130 | 0 |
|
131 | 0 | // FIXME: Leaving this as-is until we have POWER9 scheduling info |
132 | 0 | if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8) |
133 | 0 | return new PPCDispatchGroupSBHazardRecognizer(II, DAG); |
134 | 0 | |
135 | 0 | // Most subtargets use a PPC970 recognizer. |
136 | 0 | if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 && |
137 | 0 | Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) { |
138 | 0 | assert(DAG->TII && "No InstrInfo?"); |
139 | 0 |
|
140 | 0 | return new PPCHazardRecognizer970(*DAG); |
141 | 0 | } |
142 | 0 | |
143 | 0 | return new ScoreboardHazardRecognizer(II, DAG); |
144 | 0 | } |
145 | | |
146 | | unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, |
147 | | const MachineInstr &MI, |
148 | 196k | unsigned *PredCost) const { |
149 | 196k | if (!ItinData || UseOldLatencyCalc) |
150 | 0 | return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost); |
151 | 196k | |
152 | 196k | // The default implementation of getInstrLatency calls getStageLatency, but |
153 | 196k | // getStageLatency does not do the right thing for us. While we have |
154 | 196k | // itinerary, most cores are fully pipelined, and so the itineraries only |
155 | 196k | // express the first part of the pipeline, not every stage. Instead, we need |
156 | 196k | // to use the listed output operand cycle number (using operand 0 here, which |
157 | 196k | // is an output). |
158 | 196k | |
159 | 196k | unsigned Latency = 1; |
160 | 196k | unsigned DefClass = MI.getDesc().getSchedClass(); |
161 | 774k | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i577k ) { |
162 | 577k | const MachineOperand &MO = MI.getOperand(i); |
163 | 577k | if (!MO.isReg() || !MO.isDef()474k || MO.isImplicit()194k ) |
164 | 401k | continue; |
165 | 175k | |
166 | 175k | int Cycle = ItinData->getOperandCycle(DefClass, i); |
167 | 175k | if (Cycle < 0) |
168 | 109k | continue; |
169 | 66.0k | |
170 | 66.0k | Latency = std::max(Latency, (unsigned) Cycle); |
171 | 66.0k | } |
172 | 196k | |
173 | 196k | return Latency; |
174 | 196k | } |
175 | | |
176 | | int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, |
177 | | const MachineInstr &DefMI, unsigned DefIdx, |
178 | | const MachineInstr &UseMI, |
179 | 91.6k | unsigned UseIdx) const { |
180 | 91.6k | int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx, |
181 | 91.6k | UseMI, UseIdx); |
182 | 91.6k | |
183 | 91.6k | if (!DefMI.getParent()) |
184 | 88 | return Latency; |
185 | 91.5k | |
186 | 91.5k | const MachineOperand &DefMO = DefMI.getOperand(DefIdx); |
187 | 91.5k | unsigned Reg = DefMO.getReg(); |
188 | 91.5k | |
189 | 91.5k | bool IsRegCR; |
190 | 91.5k | if (TargetRegisterInfo::isVirtualRegister(Reg)) { |
191 | 54.1k | const MachineRegisterInfo *MRI = |
192 | 54.1k | &DefMI.getParent()->getParent()->getRegInfo(); |
193 | 54.1k | IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) || |
194 | 54.1k | MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass)52.4k ; |
195 | 54.1k | } else { |
196 | 37.3k | IsRegCR = PPC::CRRCRegClass.contains(Reg) || |
197 | 37.3k | PPC::CRBITRCRegClass.contains(Reg)35.4k ; |
198 | 37.3k | } |
199 | 91.5k | |
200 | 91.5k | if (UseMI.isBranch() && IsRegCR1.14k ) { |
201 | 1.14k | if (Latency < 0) |
202 | 462 | Latency = getInstrLatency(ItinData, DefMI); |
203 | 1.14k | |
204 | 1.14k | // On some cores, there is an additional delay between writing to a condition |
205 | 1.14k | // register, and using it from a branch. |
206 | 1.14k | unsigned Directive = Subtarget.getDarwinDirective(); |
207 | 1.14k | switch (Directive) { |
208 | 1.14k | default: break402 ; |
209 | 1.14k | case PPC::DIR_7400: |
210 | 742 | case PPC::DIR_750: |
211 | 742 | case PPC::DIR_970: |
212 | 742 | case PPC::DIR_E5500: |
213 | 742 | case PPC::DIR_PWR4: |
214 | 742 | case PPC::DIR_PWR5: |
215 | 742 | case PPC::DIR_PWR5X: |
216 | 742 | case PPC::DIR_PWR6: |
217 | 742 | case PPC::DIR_PWR6X: |
218 | 742 | case PPC::DIR_PWR7: |
219 | 742 | case PPC::DIR_PWR8: |
220 | 742 | // FIXME: Is this needed for POWER9? |
221 | 742 | Latency += 2; |
222 | 742 | break; |
223 | 91.5k | } |
224 | 91.5k | } |
225 | 91.5k | |
226 | 91.5k | return Latency; |
227 | 91.5k | } |
228 | | |
229 | | // This function does not list all associative and commutative operations, but |
230 | | // only those worth feeding through the machine combiner in an attempt to |
231 | | // reduce the critical path. Mostly, this means floating-point operations, |
232 | | // because they have high latencies (compared to other operations, such and |
233 | | // and/or, which are also associative and commutative, but have low latencies). |
234 | 307 | bool PPCInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { |
235 | 307 | switch (Inst.getOpcode()) { |
236 | 307 | // FP Add: |
237 | 307 | case PPC::FADD: |
238 | 80 | case PPC::FADDS: |
239 | 80 | // FP Multiply: |
240 | 80 | case PPC::FMUL: |
241 | 80 | case PPC::FMULS: |
242 | 80 | // Altivec Add: |
243 | 80 | case PPC::VADDFP: |
244 | 80 | // VSX Add: |
245 | 80 | case PPC::XSADDDP: |
246 | 80 | case PPC::XVADDDP: |
247 | 80 | case PPC::XVADDSP: |
248 | 80 | case PPC::XSADDSP: |
249 | 80 | // VSX Multiply: |
250 | 80 | case PPC::XSMULDP: |
251 | 80 | case PPC::XVMULDP: |
252 | 80 | case PPC::XVMULSP: |
253 | 80 | case PPC::XSMULSP: |
254 | 80 | // QPX Add: |
255 | 80 | case PPC::QVFADD: |
256 | 80 | case PPC::QVFADDS: |
257 | 80 | case PPC::QVFADDSs: |
258 | 80 | // QPX Multiply: |
259 | 80 | case PPC::QVFMUL: |
260 | 80 | case PPC::QVFMULS: |
261 | 80 | case PPC::QVFMULSs: |
262 | 80 | return true; |
263 | 227 | default: |
264 | 227 | return false; |
265 | 307 | } |
266 | 307 | } |
267 | | |
268 | | bool PPCInstrInfo::getMachineCombinerPatterns( |
269 | | MachineInstr &Root, |
270 | 135k | SmallVectorImpl<MachineCombinerPattern> &Patterns) const { |
271 | 135k | // Using the machine combiner in this way is potentially expensive, so |
272 | 135k | // restrict to when aggressive optimizations are desired. |
273 | 135k | if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOpt::Aggressive) |
274 | 127k | return false; |
275 | 7.77k | |
276 | 7.77k | // FP reassociation is only legal when we don't need strict IEEE semantics. |
277 | 7.77k | if (!Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath) |
278 | 7.46k | return false; |
279 | 307 | |
280 | 307 | return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns); |
281 | 307 | } |
282 | | |
283 | | // Detect 32 -> 64-bit extensions where we may reuse the low sub-register. |
284 | | bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI, |
285 | | unsigned &SrcReg, unsigned &DstReg, |
286 | 126k | unsigned &SubIdx) const { |
287 | 126k | switch (MI.getOpcode()) { |
288 | 126k | default: return false125k ; |
289 | 126k | case PPC::EXTSW: |
290 | 426 | case PPC::EXTSW_32: |
291 | 426 | case PPC::EXTSW_32_64: |
292 | 426 | SrcReg = MI.getOperand(1).getReg(); |
293 | 426 | DstReg = MI.getOperand(0).getReg(); |
294 | 426 | SubIdx = PPC::sub_32; |
295 | 426 | return true; |
296 | 126k | } |
297 | 126k | } |
298 | | |
299 | | unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
300 | 11.7k | int &FrameIndex) const { |
301 | 11.7k | unsigned Opcode = MI.getOpcode(); |
302 | 11.7k | const unsigned *OpcodesForSpill = getLoadOpcodesForSpillArray(); |
303 | 11.7k | const unsigned *End = OpcodesForSpill + SOK_LastOpcodeSpill; |
304 | 11.7k | |
305 | 11.7k | if (End != std::find(OpcodesForSpill, End, Opcode)) { |
306 | 703 | // Check for the operands added by addFrameReference (the immediate is the |
307 | 703 | // offset which defaults to 0). |
308 | 703 | if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm()480 && |
309 | 703 | MI.getOperand(2).isFI()240 ) { |
310 | 163 | FrameIndex = MI.getOperand(2).getIndex(); |
311 | 163 | return MI.getOperand(0).getReg(); |
312 | 163 | } |
313 | 11.6k | } |
314 | 11.6k | return 0; |
315 | 11.6k | } |
316 | | |
317 | | // For opcodes with the ReMaterializable flag set, this function is called to |
318 | | // verify the instruction is really rematable. |
319 | | bool PPCInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, |
320 | 5.67k | AliasAnalysis *AA) const { |
321 | 5.67k | switch (MI.getOpcode()) { |
322 | 5.67k | default: |
323 | 0 | // This function should only be called for opcodes with the ReMaterializable |
324 | 0 | // flag set. |
325 | 0 | llvm_unreachable("Unknown rematerializable operation!"); |
326 | 5.67k | break0 ; |
327 | 5.67k | case PPC::LI: |
328 | 5.67k | case PPC::LI8: |
329 | 5.67k | case PPC::LIS: |
330 | 5.67k | case PPC::LIS8: |
331 | 5.67k | case PPC::QVGPCI: |
332 | 5.67k | case PPC::ADDIStocHA8: |
333 | 5.67k | case PPC::ADDItocL: |
334 | 5.67k | case PPC::LOAD_STACK_GUARD: |
335 | 5.67k | case PPC::XXLXORz: |
336 | 5.67k | case PPC::XXLXORspz: |
337 | 5.67k | case PPC::XXLXORdpz: |
338 | 5.67k | case PPC::V_SET0B: |
339 | 5.67k | case PPC::V_SET0H: |
340 | 5.67k | case PPC::V_SET0: |
341 | 5.67k | case PPC::V_SETALLONESB: |
342 | 5.67k | case PPC::V_SETALLONESH: |
343 | 5.67k | case PPC::V_SETALLONES: |
344 | 5.67k | case PPC::CRSET: |
345 | 5.67k | case PPC::CRUNSET: |
346 | 5.67k | return true; |
347 | 0 | } |
348 | 0 | return false; |
349 | 0 | } |
350 | | |
351 | | unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
352 | 9.24k | int &FrameIndex) const { |
353 | 9.24k | unsigned Opcode = MI.getOpcode(); |
354 | 9.24k | const unsigned *OpcodesForSpill = getStoreOpcodesForSpillArray(); |
355 | 9.24k | const unsigned *End = OpcodesForSpill + SOK_LastOpcodeSpill; |
356 | 9.24k | |
357 | 9.24k | if (End != std::find(OpcodesForSpill, End, Opcode)) { |
358 | 415 | if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm()400 && |
359 | 415 | MI.getOperand(2).isFI()147 ) { |
360 | 44 | FrameIndex = MI.getOperand(2).getIndex(); |
361 | 44 | return MI.getOperand(0).getReg(); |
362 | 44 | } |
363 | 9.19k | } |
364 | 9.19k | return 0; |
365 | 9.19k | } |
366 | | |
367 | | MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
368 | | unsigned OpIdx1, |
369 | 8.09k | unsigned OpIdx2) const { |
370 | 8.09k | MachineFunction &MF = *MI.getParent()->getParent(); |
371 | 8.09k | |
372 | 8.09k | // Normal instructions can be commuted the obvious way. |
373 | 8.09k | if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMIo7.97k ) |
374 | 7.97k | return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); |
375 | 120 | // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a |
376 | 120 | // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because |
377 | 120 | // changing the relative order of the mask operands might change what happens |
378 | 120 | // to the high-bits of the mask (and, thus, the result). |
379 | 120 | |
380 | 120 | // Cannot commute if it has a non-zero rotate count. |
381 | 120 | if (MI.getOperand(3).getImm() != 0) |
382 | 91 | return nullptr; |
383 | 29 | |
384 | 29 | // If we have a zero rotate count, we have: |
385 | 29 | // M = mask(MB,ME) |
386 | 29 | // Op0 = (Op1 & ~M) | (Op2 & M) |
387 | 29 | // Change this to: |
388 | 29 | // M = mask((ME+1)&31, (MB-1)&31) |
389 | 29 | // Op0 = (Op2 & ~M) | (Op1 & M) |
390 | 29 | |
391 | 29 | // Swap op1/op2 |
392 | 29 | assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) && |
393 | 29 | "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo."); |
394 | 29 | Register Reg0 = MI.getOperand(0).getReg(); |
395 | 29 | Register Reg1 = MI.getOperand(1).getReg(); |
396 | 29 | Register Reg2 = MI.getOperand(2).getReg(); |
397 | 29 | unsigned SubReg1 = MI.getOperand(1).getSubReg(); |
398 | 29 | unsigned SubReg2 = MI.getOperand(2).getSubReg(); |
399 | 29 | bool Reg1IsKill = MI.getOperand(1).isKill(); |
400 | 29 | bool Reg2IsKill = MI.getOperand(2).isKill(); |
401 | 29 | bool ChangeReg0 = false; |
402 | 29 | // If machine instrs are no longer in two-address forms, update |
403 | 29 | // destination register as well. |
404 | 29 | if (Reg0 == Reg1) { |
405 | 0 | // Must be two address instruction! |
406 | 0 | assert(MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) && |
407 | 0 | "Expecting a two-address instruction!"); |
408 | 0 | assert(MI.getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch"); |
409 | 0 | Reg2IsKill = false; |
410 | 0 | ChangeReg0 = true; |
411 | 0 | } |
412 | 29 | |
413 | 29 | // Masks. |
414 | 29 | unsigned MB = MI.getOperand(4).getImm(); |
415 | 29 | unsigned ME = MI.getOperand(5).getImm(); |
416 | 29 | |
417 | 29 | // We can't commute a trivial mask (there is no way to represent an all-zero |
418 | 29 | // mask). |
419 | 29 | if (MB == 0 && ME == 3113 ) |
420 | 3 | return nullptr; |
421 | 26 | |
422 | 26 | if (NewMI) { |
423 | 0 | // Create a new instruction. |
424 | 0 | Register Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg(); |
425 | 0 | bool Reg0IsDead = MI.getOperand(0).isDead(); |
426 | 0 | return BuildMI(MF, MI.getDebugLoc(), MI.getDesc()) |
427 | 0 | .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead)) |
428 | 0 | .addReg(Reg2, getKillRegState(Reg2IsKill)) |
429 | 0 | .addReg(Reg1, getKillRegState(Reg1IsKill)) |
430 | 0 | .addImm((ME + 1) & 31) |
431 | 0 | .addImm((MB - 1) & 31); |
432 | 0 | } |
433 | 26 | |
434 | 26 | if (ChangeReg0) { |
435 | 0 | MI.getOperand(0).setReg(Reg2); |
436 | 0 | MI.getOperand(0).setSubReg(SubReg2); |
437 | 0 | } |
438 | 26 | MI.getOperand(2).setReg(Reg1); |
439 | 26 | MI.getOperand(1).setReg(Reg2); |
440 | 26 | MI.getOperand(2).setSubReg(SubReg1); |
441 | 26 | MI.getOperand(1).setSubReg(SubReg2); |
442 | 26 | MI.getOperand(2).setIsKill(Reg1IsKill); |
443 | 26 | MI.getOperand(1).setIsKill(Reg2IsKill); |
444 | 26 | |
445 | 26 | // Swap the mask around. |
446 | 26 | MI.getOperand(4).setImm((ME + 1) & 31); |
447 | 26 | MI.getOperand(5).setImm((MB - 1) & 31); |
448 | 26 | return &MI; |
449 | 26 | } |
450 | | |
451 | | bool PPCInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, |
452 | 8.54k | unsigned &SrcOpIdx2) const { |
453 | 8.54k | // For VSX A-Type FMA instructions, it is the first two operands that can be |
454 | 8.54k | // commuted, however, because the non-encoded tied input operand is listed |
455 | 8.54k | // first, the operands to swap are actually the second and third. |
456 | 8.54k | |
457 | 8.54k | int AltOpc = PPC::getAltVSXFMAOpcode(MI.getOpcode()); |
458 | 8.54k | if (AltOpc == -1) |
459 | 7.77k | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
460 | 770 | |
461 | 770 | // The commutable operand indices are 2 and 3. Return them in SrcOpIdx1 |
462 | 770 | // and SrcOpIdx2. |
463 | 770 | return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3); |
464 | 770 | } |
465 | | |
466 | | void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB, |
467 | 0 | MachineBasicBlock::iterator MI) const { |
468 | 0 | // This function is used for scheduling, and the nop wanted here is the type |
469 | 0 | // that terminates dispatch groups on the POWER cores. |
470 | 0 | unsigned Directive = Subtarget.getDarwinDirective(); |
471 | 0 | unsigned Opcode; |
472 | 0 | switch (Directive) { |
473 | 0 | default: Opcode = PPC::NOP; break; |
474 | 0 | case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break; |
475 | 0 | case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break; |
476 | 0 | case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */ |
477 | 0 | // FIXME: Update when POWER9 scheduling model is ready. |
478 | 0 | case PPC::DIR_PWR9: Opcode = PPC::NOP_GT_PWR7; break; |
479 | 0 | } |
480 | 0 | |
481 | 0 | DebugLoc DL; |
482 | 0 | BuildMI(MBB, MI, DL, get(Opcode)); |
483 | 0 | } |
484 | | |
485 | | /// Return the noop instruction to use for a noop. |
486 | 0 | void PPCInstrInfo::getNoop(MCInst &NopInst) const { |
487 | 0 | NopInst.setOpcode(PPC::NOP); |
488 | 0 | } |
489 | | |
490 | | // Branch analysis. |
491 | | // Note: If the condition register is set to CTR or CTR8 then this is a |
492 | | // BDNZ (imm == 1) or BDZ (imm == 0) branch. |
493 | | bool PPCInstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
494 | | MachineBasicBlock *&TBB, |
495 | | MachineBasicBlock *&FBB, |
496 | | SmallVectorImpl<MachineOperand> &Cond, |
497 | 588k | bool AllowModify) const { |
498 | 588k | bool isPPC64 = Subtarget.isPPC64(); |
499 | 588k | |
500 | 588k | // If the block has no terminators, it just falls into the block after it. |
501 | 588k | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
502 | 588k | if (I == MBB.end()) |
503 | 15.4k | return false; |
504 | 573k | |
505 | 573k | if (!isUnpredicatedTerminator(*I)) |
506 | 68.0k | return false; |
507 | 505k | |
508 | 505k | if (AllowModify) { |
509 | 80.3k | // If the BB ends with an unconditional branch to the fallthrough BB, |
510 | 80.3k | // we eliminate the branch instruction. |
511 | 80.3k | if (I->getOpcode() == PPC::B && |
512 | 80.3k | MBB.isLayoutSuccessor(I->getOperand(0).getMBB())9.17k ) { |
513 | 1.59k | I->eraseFromParent(); |
514 | 1.59k | |
515 | 1.59k | // We update iterator after deleting the last branch. |
516 | 1.59k | I = MBB.getLastNonDebugInstr(); |
517 | 1.59k | if (I == MBB.end() || !isUnpredicatedTerminator(*I)1.58k ) |
518 | 167 | return false; |
519 | 504k | } |
520 | 80.3k | } |
521 | 504k | |
522 | 504k | // Get the last instruction in the block. |
523 | 504k | MachineInstr &LastInst = *I; |
524 | 504k | |
525 | 504k | // If there is only one terminator instruction, process it. |
526 | 504k | if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)465k ) { |
527 | 462k | if (LastInst.getOpcode() == PPC::B) { |
528 | 20.3k | if (!LastInst.getOperand(0).isMBB()) |
529 | 0 | return true; |
530 | 20.3k | TBB = LastInst.getOperand(0).getMBB(); |
531 | 20.3k | return false; |
532 | 442k | } else if (LastInst.getOpcode() == PPC::BCC) { |
533 | 50.4k | if (!LastInst.getOperand(2).isMBB()) |
534 | 0 | return true; |
535 | 50.4k | // Block ends with fall-through condbranch. |
536 | 50.4k | TBB = LastInst.getOperand(2).getMBB(); |
537 | 50.4k | Cond.push_back(LastInst.getOperand(0)); |
538 | 50.4k | Cond.push_back(LastInst.getOperand(1)); |
539 | 50.4k | return false; |
540 | 391k | } else if (LastInst.getOpcode() == PPC::BC) { |
541 | 15.4k | if (!LastInst.getOperand(1).isMBB()) |
542 | 0 | return true; |
543 | 15.4k | // Block ends with fall-through condbranch. |
544 | 15.4k | TBB = LastInst.getOperand(1).getMBB(); |
545 | 15.4k | Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); |
546 | 15.4k | Cond.push_back(LastInst.getOperand(0)); |
547 | 15.4k | return false; |
548 | 376k | } else if (LastInst.getOpcode() == PPC::BCn) { |
549 | 8.00k | if (!LastInst.getOperand(1).isMBB()) |
550 | 0 | return true; |
551 | 8.00k | // Block ends with fall-through condbranch. |
552 | 8.00k | TBB = LastInst.getOperand(1).getMBB(); |
553 | 8.00k | Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET)); |
554 | 8.00k | Cond.push_back(LastInst.getOperand(0)); |
555 | 8.00k | return false; |
556 | 368k | } else if (LastInst.getOpcode() == PPC::BDNZ8 || |
557 | 368k | LastInst.getOpcode() == PPC::BDNZ363k ) { |
558 | 4.60k | if (!LastInst.getOperand(0).isMBB()) |
559 | 0 | return true; |
560 | 4.60k | if (DisableCTRLoopAnal) |
561 | 0 | return true; |
562 | 4.60k | TBB = LastInst.getOperand(0).getMBB(); |
563 | 4.60k | Cond.push_back(MachineOperand::CreateImm(1)); |
564 | 4.60k | Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR84.50k : PPC::CTR107 , |
565 | 4.60k | true)); |
566 | 4.60k | return false; |
567 | 363k | } else if (LastInst.getOpcode() == PPC::BDZ8 || |
568 | 363k | LastInst.getOpcode() == PPC::BDZ363k ) { |
569 | 280 | if (!LastInst.getOperand(0).isMBB()) |
570 | 0 | return true; |
571 | 280 | if (DisableCTRLoopAnal) |
572 | 0 | return true; |
573 | 280 | TBB = LastInst.getOperand(0).getMBB(); |
574 | 280 | Cond.push_back(MachineOperand::CreateImm(0)); |
575 | 280 | Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8213 : PPC::CTR67 , |
576 | 280 | true)); |
577 | 280 | return false; |
578 | 280 | } |
579 | 363k | |
580 | 363k | // Otherwise, don't know what this is. |
581 | 363k | return true; |
582 | 363k | } |
583 | 42.6k | |
584 | 42.6k | // Get the instruction before it if it's a terminator. |
585 | 42.6k | MachineInstr &SecondLastInst = *I; |
586 | 42.6k | |
587 | 42.6k | // If there are three terminators, we don't know what sort of block this is. |
588 | 42.6k | if (I != MBB.begin() && isUnpredicatedTerminator(*--I)37.5k ) |
589 | 0 | return true; |
590 | 42.6k | |
591 | 42.6k | // If the block ends with PPC::B and PPC:BCC, handle it. |
592 | 42.6k | if (SecondLastInst.getOpcode() == PPC::BCC && |
593 | 42.6k | LastInst.getOpcode() == PPC::B21.3k ) { |
594 | 21.3k | if (!SecondLastInst.getOperand(2).isMBB() || |
595 | 21.3k | !LastInst.getOperand(0).isMBB()) |
596 | 0 | return true; |
597 | 21.3k | TBB = SecondLastInst.getOperand(2).getMBB(); |
598 | 21.3k | Cond.push_back(SecondLastInst.getOperand(0)); |
599 | 21.3k | Cond.push_back(SecondLastInst.getOperand(1)); |
600 | 21.3k | FBB = LastInst.getOperand(0).getMBB(); |
601 | 21.3k | return false; |
602 | 21.3k | } else if (21.2k SecondLastInst.getOpcode() == PPC::BC21.2k && |
603 | 21.2k | LastInst.getOpcode() == PPC::B11.9k ) { |
604 | 11.9k | if (!SecondLastInst.getOperand(1).isMBB() || |
605 | 11.9k | !LastInst.getOperand(0).isMBB()) |
606 | 0 | return true; |
607 | 11.9k | TBB = SecondLastInst.getOperand(1).getMBB(); |
608 | 11.9k | Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); |
609 | 11.9k | Cond.push_back(SecondLastInst.getOperand(0)); |
610 | 11.9k | FBB = LastInst.getOperand(0).getMBB(); |
611 | 11.9k | return false; |
612 | 11.9k | } else if (9.34k SecondLastInst.getOpcode() == PPC::BCn9.34k && |
613 | 9.34k | LastInst.getOpcode() == PPC::B2.68k ) { |
614 | 2.68k | if (!SecondLastInst.getOperand(1).isMBB() || |
615 | 2.68k | !LastInst.getOperand(0).isMBB()) |
616 | 0 | return true; |
617 | 2.68k | TBB = SecondLastInst.getOperand(1).getMBB(); |
618 | 2.68k | Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET)); |
619 | 2.68k | Cond.push_back(SecondLastInst.getOperand(0)); |
620 | 2.68k | FBB = LastInst.getOperand(0).getMBB(); |
621 | 2.68k | return false; |
622 | 6.66k | } else if ((SecondLastInst.getOpcode() == PPC::BDNZ8 || |
623 | 6.66k | SecondLastInst.getOpcode() == PPC::BDNZ2.01k ) && |
624 | 6.66k | LastInst.getOpcode() == PPC::B4.77k ) { |
625 | 4.77k | if (!SecondLastInst.getOperand(0).isMBB() || |
626 | 4.77k | !LastInst.getOperand(0).isMBB()) |
627 | 0 | return true; |
628 | 4.77k | if (DisableCTRLoopAnal) |
629 | 0 | return true; |
630 | 4.77k | TBB = SecondLastInst.getOperand(0).getMBB(); |
631 | 4.77k | Cond.push_back(MachineOperand::CreateImm(1)); |
632 | 4.77k | Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR84.64k : PPC::CTR130 , |
633 | 4.77k | true)); |
634 | 4.77k | FBB = LastInst.getOperand(0).getMBB(); |
635 | 4.77k | return false; |
636 | 4.77k | } else if (1.88k (1.88k SecondLastInst.getOpcode() == PPC::BDZ81.88k || |
637 | 1.88k | SecondLastInst.getOpcode() == PPC::BDZ1.75k ) && |
638 | 1.88k | LastInst.getOpcode() == PPC::B154 ) { |
639 | 154 | if (!SecondLastInst.getOperand(0).isMBB() || |
640 | 154 | !LastInst.getOperand(0).isMBB()) |
641 | 0 | return true; |
642 | 154 | if (DisableCTRLoopAnal) |
643 | 0 | return true; |
644 | 154 | TBB = SecondLastInst.getOperand(0).getMBB(); |
645 | 154 | Cond.push_back(MachineOperand::CreateImm(0)); |
646 | 154 | Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8129 : PPC::CTR25 , |
647 | 154 | true)); |
648 | 154 | FBB = LastInst.getOperand(0).getMBB(); |
649 | 154 | return false; |
650 | 154 | } |
651 | 1.73k | |
652 | 1.73k | // If the block ends with two PPC:Bs, handle it. The second one is not |
653 | 1.73k | // executed, so remove it. |
654 | 1.73k | if (SecondLastInst.getOpcode() == PPC::B && LastInst.getOpcode() == PPC::B0 ) { |
655 | 0 | if (!SecondLastInst.getOperand(0).isMBB()) |
656 | 0 | return true; |
657 | 0 | TBB = SecondLastInst.getOperand(0).getMBB(); |
658 | 0 | I = LastInst; |
659 | 0 | if (AllowModify) |
660 | 0 | I->eraseFromParent(); |
661 | 0 | return false; |
662 | 0 | } |
663 | 1.73k | |
664 | 1.73k | // Otherwise, can't handle this. |
665 | 1.73k | return true; |
666 | 1.73k | } |
667 | | |
668 | | unsigned PPCInstrInfo::removeBranch(MachineBasicBlock &MBB, |
669 | 10.5k | int *BytesRemoved) const { |
670 | 10.5k | assert(!BytesRemoved && "code size not handled"); |
671 | 10.5k | |
672 | 10.5k | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
673 | 10.5k | if (I == MBB.end()) |
674 | 4 | return 0; |
675 | 10.5k | |
676 | 10.5k | if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC8.04k && |
677 | 10.5k | I->getOpcode() != PPC::BC3.27k && I->getOpcode() != PPC::BCn1.72k && |
678 | 10.5k | I->getOpcode() != PPC::BDNZ8326 && I->getOpcode() != PPC::BDNZ280 && |
679 | 10.5k | I->getOpcode() != PPC::BDZ8274 && I->getOpcode() != PPC::BDZ229 ) |
680 | 223 | return 0; |
681 | 10.2k | |
682 | 10.2k | // Remove the branch. |
683 | 10.2k | I->eraseFromParent(); |
684 | 10.2k | |
685 | 10.2k | I = MBB.end(); |
686 | 10.2k | |
687 | 10.2k | if (I == MBB.begin()) return 11.46k ; |
688 | 8.81k | --I; |
689 | 8.81k | if (I->getOpcode() != PPC::BCC && |
690 | 8.81k | I->getOpcode() != PPC::BC7.91k && I->getOpcode() != PPC::BCn7.76k && |
691 | 8.81k | I->getOpcode() != PPC::BDNZ87.68k && I->getOpcode() != PPC::BDNZ7.67k && |
692 | 8.81k | I->getOpcode() != PPC::BDZ87.67k && I->getOpcode() != PPC::BDZ7.66k ) |
693 | 7.66k | return 1; |
694 | 1.14k | |
695 | 1.14k | // Remove the branch. |
696 | 1.14k | I->eraseFromParent(); |
697 | 1.14k | return 2; |
698 | 1.14k | } |
699 | | |
700 | | unsigned PPCInstrInfo::insertBranch(MachineBasicBlock &MBB, |
701 | | MachineBasicBlock *TBB, |
702 | | MachineBasicBlock *FBB, |
703 | | ArrayRef<MachineOperand> Cond, |
704 | | const DebugLoc &DL, |
705 | 10.6k | int *BytesAdded) const { |
706 | 10.6k | // Shouldn't be a fall through. |
707 | 10.6k | assert(TBB && "insertBranch must not be told to insert a fallthrough"); |
708 | 10.6k | assert((Cond.size() == 2 || Cond.size() == 0) && |
709 | 10.6k | "PPC branch conditions have two components!"); |
710 | 10.6k | assert(!BytesAdded && "code size not handled"); |
711 | 10.6k | |
712 | 10.6k | bool isPPC64 = Subtarget.isPPC64(); |
713 | 10.6k | |
714 | 10.6k | // One-way branch. |
715 | 10.6k | if (!FBB) { |
716 | 10.4k | if (Cond.empty()) // Unconditional branch |
717 | 2.04k | BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB); |
718 | 8.42k | else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR88.41k ) |
719 | 110 | BuildMI(&MBB, DL, get(Cond[0].getImm() ? |
720 | 56 | (isPPC64 ? PPC::BDNZ851 : PPC::BDNZ5 ) : |
721 | 110 | (isPPC64 54 ? PPC::BDZ847 : PPC::BDZ7 ))).addMBB(TBB); |
722 | 8.31k | else if (Cond[0].getImm() == PPC::PRED_BIT_SET) |
723 | 1.43k | BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB); |
724 | 6.87k | else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) |
725 | 1.50k | BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB); |
726 | 5.37k | else // Conditional branch |
727 | 5.37k | BuildMI(&MBB, DL, get(PPC::BCC)) |
728 | 5.37k | .addImm(Cond[0].getImm()) |
729 | 5.37k | .add(Cond[1]) |
730 | 5.37k | .addMBB(TBB); |
731 | 10.4k | return 1; |
732 | 10.4k | } |
733 | 210 | |
734 | 210 | // Two-way Conditional Branch. |
735 | 210 | if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) |
736 | 8 | BuildMI(&MBB, DL, get(Cond[0].getImm() ? |
737 | 7 | (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ0 ) : |
738 | 8 | (isPPC64 1 ? PPC::BDZ81 : PPC::BDZ0 ))).addMBB(TBB); |
739 | 202 | else if (Cond[0].getImm() == PPC::PRED_BIT_SET) |
740 | 51 | BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB); |
741 | 151 | else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) |
742 | 41 | BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB); |
743 | 110 | else |
744 | 110 | BuildMI(&MBB, DL, get(PPC::BCC)) |
745 | 110 | .addImm(Cond[0].getImm()) |
746 | 110 | .add(Cond[1]) |
747 | 110 | .addMBB(TBB); |
748 | 210 | BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB); |
749 | 210 | return 2; |
750 | 210 | } |
751 | | |
752 | | // Select analysis. |
753 | | bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, |
754 | | ArrayRef<MachineOperand> Cond, |
755 | | unsigned TrueReg, unsigned FalseReg, |
756 | 350 | int &CondCycles, int &TrueCycles, int &FalseCycles) const { |
757 | 350 | if (Cond.size() != 2) |
758 | 0 | return false; |
759 | 350 | |
760 | 350 | // If this is really a bdnz-like condition, then it cannot be turned into a |
761 | 350 | // select. |
762 | 350 | if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) |
763 | 0 | return false; |
764 | 350 | |
765 | 350 | // Check register classes. |
766 | 350 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
767 | 350 | const TargetRegisterClass *RC = |
768 | 350 | RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); |
769 | 350 | if (!RC) |
770 | 0 | return false; |
771 | 350 | |
772 | 350 | // isel is for regular integer GPRs only. |
773 | 350 | if (!PPC::GPRCRegClass.hasSubClassEq(RC) && |
774 | 350 | !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)295 && |
775 | 350 | !PPC::G8RCRegClass.hasSubClassEq(RC)295 && |
776 | 350 | !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)257 ) |
777 | 257 | return false; |
778 | 93 | |
779 | 93 | // FIXME: These numbers are for the A2, how well they work for other cores is |
780 | 93 | // an open question. On the A2, the isel instruction has a 2-cycle latency |
781 | 93 | // but single-cycle throughput. These numbers are used in combination with |
782 | 93 | // the MispredictPenalty setting from the active SchedMachineModel. |
783 | 93 | CondCycles = 1; |
784 | 93 | TrueCycles = 1; |
785 | 93 | FalseCycles = 1; |
786 | 93 | |
787 | 93 | return true; |
788 | 93 | } |
789 | | |
790 | | void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, |
791 | | MachineBasicBlock::iterator MI, |
792 | | const DebugLoc &dl, unsigned DestReg, |
793 | | ArrayRef<MachineOperand> Cond, unsigned TrueReg, |
794 | 533 | unsigned FalseReg) const { |
795 | 533 | assert(Cond.size() == 2 && |
796 | 533 | "PPC branch conditions have two components!"); |
797 | 533 | |
798 | 533 | // Get the register classes. |
799 | 533 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
800 | 533 | const TargetRegisterClass *RC = |
801 | 533 | RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); |
802 | 533 | assert(RC && "TrueReg and FalseReg must have overlapping register classes"); |
803 | 533 | |
804 | 533 | bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) || |
805 | 533 | PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)214 ; |
806 | 533 | assert((Is64Bit || |
807 | 533 | PPC::GPRCRegClass.hasSubClassEq(RC) || |
808 | 533 | PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) && |
809 | 533 | "isel is for regular integer GPRs only"); |
810 | 533 | |
811 | 533 | unsigned OpCode = Is64Bit ? PPC::ISEL8319 : PPC::ISEL214 ; |
812 | 533 | auto SelectPred = static_cast<PPC::Predicate>(Cond[0].getImm()); |
813 | 533 | |
814 | 533 | unsigned SubIdx = 0; |
815 | 533 | bool SwapOps = false; |
816 | 533 | switch (SelectPred) { |
817 | 533 | case PPC::PRED_EQ: |
818 | 53 | case PPC::PRED_EQ_MINUS: |
819 | 53 | case PPC::PRED_EQ_PLUS: |
820 | 53 | SubIdx = PPC::sub_eq; SwapOps = false; break; |
821 | 53 | case PPC::PRED_NE: |
822 | 17 | case PPC::PRED_NE_MINUS: |
823 | 17 | case PPC::PRED_NE_PLUS: |
824 | 17 | SubIdx = PPC::sub_eq; SwapOps = true; break; |
825 | 99 | case PPC::PRED_LT: |
826 | 99 | case PPC::PRED_LT_MINUS: |
827 | 99 | case PPC::PRED_LT_PLUS: |
828 | 99 | SubIdx = PPC::sub_lt; SwapOps = false; break; |
829 | 99 | case PPC::PRED_GE: |
830 | 7 | case PPC::PRED_GE_MINUS: |
831 | 7 | case PPC::PRED_GE_PLUS: |
832 | 7 | SubIdx = PPC::sub_lt; SwapOps = true; break; |
833 | 134 | case PPC::PRED_GT: |
834 | 134 | case PPC::PRED_GT_MINUS: |
835 | 134 | case PPC::PRED_GT_PLUS: |
836 | 134 | SubIdx = PPC::sub_gt; SwapOps = false; break; |
837 | 134 | case PPC::PRED_LE: |
838 | 4 | case PPC::PRED_LE_MINUS: |
839 | 4 | case PPC::PRED_LE_PLUS: |
840 | 4 | SubIdx = PPC::sub_gt; SwapOps = true; break; |
841 | 4 | case PPC::PRED_UN: |
842 | 0 | case PPC::PRED_UN_MINUS: |
843 | 0 | case PPC::PRED_UN_PLUS: |
844 | 0 | SubIdx = PPC::sub_un; SwapOps = false; break; |
845 | 0 | case PPC::PRED_NU: |
846 | 0 | case PPC::PRED_NU_MINUS: |
847 | 0 | case PPC::PRED_NU_PLUS: |
848 | 0 | SubIdx = PPC::sub_un; SwapOps = true; break; |
849 | 219 | case PPC::PRED_BIT_SET: SubIdx = 0; SwapOps = false; break; |
850 | 0 | case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break; |
851 | 533 | } |
852 | 533 | |
853 | 533 | unsigned FirstReg = SwapOps ? FalseReg28 : TrueReg505 , |
854 | 533 | SecondReg = SwapOps ? TrueReg28 : FalseReg505 ; |
855 | 533 | |
856 | 533 | // The first input register of isel cannot be r0. If it is a member |
857 | 533 | // of a register class that can be r0, then copy it first (the |
858 | 533 | // register allocator should eliminate the copy). |
859 | 533 | if (MRI.getRegClass(FirstReg)->contains(PPC::R0) || |
860 | 533 | MRI.getRegClass(FirstReg)->contains(PPC::X0)526 ) { |
861 | 15 | const TargetRegisterClass *FirstRC = |
862 | 15 | MRI.getRegClass(FirstReg)->contains(PPC::X0) ? |
863 | 8 | &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass7 ; |
864 | 15 | unsigned OldFirstReg = FirstReg; |
865 | 15 | FirstReg = MRI.createVirtualRegister(FirstRC); |
866 | 15 | BuildMI(MBB, MI, dl, get(TargetOpcode::COPY), FirstReg) |
867 | 15 | .addReg(OldFirstReg); |
868 | 15 | } |
869 | 533 | |
870 | 533 | BuildMI(MBB, MI, dl, get(OpCode), DestReg) |
871 | 533 | .addReg(FirstReg).addReg(SecondReg) |
872 | 533 | .addReg(Cond[1].getReg(), 0, SubIdx); |
873 | 533 | } |
874 | | |
875 | 1 | static unsigned getCRBitValue(unsigned CRBit) { |
876 | 1 | unsigned Ret = 4; |
877 | 1 | if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT || |
878 | 1 | CRBit == PPC::CR2LT || CRBit == PPC::CR3LT || |
879 | 1 | CRBit == PPC::CR4LT || CRBit == PPC::CR5LT || |
880 | 1 | CRBit == PPC::CR6LT || CRBit == PPC::CR7LT) |
881 | 0 | Ret = 3; |
882 | 1 | if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT || |
883 | 1 | CRBit == PPC::CR2GT || CRBit == PPC::CR3GT || |
884 | 1 | CRBit == PPC::CR4GT || CRBit == PPC::CR5GT || |
885 | 1 | CRBit == PPC::CR6GT || CRBit == PPC::CR7GT) |
886 | 0 | Ret = 2; |
887 | 1 | if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ0 || |
888 | 1 | CRBit == PPC::CR2EQ0 || CRBit == PPC::CR3EQ0 || |
889 | 1 | CRBit == PPC::CR4EQ0 || CRBit == PPC::CR5EQ0 || |
890 | 1 | CRBit == PPC::CR6EQ0 || CRBit == PPC::CR7EQ0 ) |
891 | 1 | Ret = 1; |
892 | 1 | if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN || |
893 | 1 | CRBit == PPC::CR2UN || CRBit == PPC::CR3UN || |
894 | 1 | CRBit == PPC::CR4UN || CRBit == PPC::CR5UN || |
895 | 1 | CRBit == PPC::CR6UN || CRBit == PPC::CR7UN) |
896 | 0 | Ret = 0; |
897 | 1 | |
898 | 1 | assert(Ret != 4 && "Invalid CR bit register"); |
899 | 1 | return Ret; |
900 | 1 | } |
901 | | |
902 | | void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
903 | | MachineBasicBlock::iterator I, |
904 | | const DebugLoc &DL, unsigned DestReg, |
905 | 3.92k | unsigned SrcReg, bool KillSrc) const { |
906 | 3.92k | // We can end up with self copies and similar things as a result of VSX copy |
907 | 3.92k | // legalization. Promote them here. |
908 | 3.92k | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
909 | 3.92k | if (PPC::F8RCRegClass.contains(DestReg) && |
910 | 3.92k | PPC::VSRCRegClass.contains(SrcReg)1.29k ) { |
911 | 0 | unsigned SuperReg = |
912 | 0 | TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass); |
913 | 0 |
|
914 | 0 | if (VSXSelfCopyCrash && SrcReg == SuperReg) |
915 | 0 | llvm_unreachable("nop VSX copy"); |
916 | 0 |
|
917 | 0 | DestReg = SuperReg; |
918 | 3.92k | } else if (PPC::F8RCRegClass.contains(SrcReg) && |
919 | 3.92k | PPC::VSRCRegClass.contains(DestReg)1.29k ) { |
920 | 0 | unsigned SuperReg = |
921 | 0 | TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass); |
922 | 0 |
|
923 | 0 | if (VSXSelfCopyCrash && DestReg == SuperReg) |
924 | 0 | llvm_unreachable("nop VSX copy"); |
925 | 0 |
|
926 | 0 | SrcReg = SuperReg; |
927 | 0 | } |
928 | 3.92k | |
929 | 3.92k | // Different class register copy |
930 | 3.92k | if (PPC::CRBITRCRegClass.contains(SrcReg) && |
931 | 3.92k | PPC::GPRCRegClass.contains(DestReg)25 ) { |
932 | 1 | unsigned CRReg = getCRFromCRBit(SrcReg); |
933 | 1 | BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(CRReg); |
934 | 1 | getKillRegState(KillSrc); |
935 | 1 | // Rotate the CR bit in the CR fields to be the least significant bit and |
936 | 1 | // then mask with 0x1 (MB = ME = 31). |
937 | 1 | BuildMI(MBB, I, DL, get(PPC::RLWINM), DestReg) |
938 | 1 | .addReg(DestReg, RegState::Kill) |
939 | 1 | .addImm(TRI->getEncodingValue(CRReg) * 4 + (4 - getCRBitValue(SrcReg))) |
940 | 1 | .addImm(31) |
941 | 1 | .addImm(31); |
942 | 1 | return; |
943 | 3.91k | } else if (PPC::CRRCRegClass.contains(SrcReg) && |
944 | 3.91k | PPC::G8RCRegClass.contains(DestReg)13 ) { |
945 | 0 | BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg).addReg(SrcReg); |
946 | 0 | getKillRegState(KillSrc); |
947 | 0 | return; |
948 | 3.91k | } else if (PPC::CRRCRegClass.contains(SrcReg) && |
949 | 3.91k | PPC::GPRCRegClass.contains(DestReg)13 ) { |
950 | 2 | BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(SrcReg); |
951 | 2 | getKillRegState(KillSrc); |
952 | 2 | return; |
953 | 3.91k | } else if (PPC::G8RCRegClass.contains(SrcReg) && |
954 | 3.91k | PPC::VSFRCRegClass.contains(DestReg)1.62k ) { |
955 | 6 | assert(Subtarget.hasDirectMove() && |
956 | 6 | "Subtarget doesn't support directmove, don't know how to copy."); |
957 | 6 | BuildMI(MBB, I, DL, get(PPC::MTVSRD), DestReg).addReg(SrcReg); |
958 | 6 | NumGPRtoVSRSpill++; |
959 | 6 | getKillRegState(KillSrc); |
960 | 6 | return; |
961 | 3.91k | } else if (PPC::VSFRCRegClass.contains(SrcReg) && |
962 | 3.91k | PPC::G8RCRegClass.contains(DestReg)1.35k ) { |
963 | 2 | assert(Subtarget.hasDirectMove() && |
964 | 2 | "Subtarget doesn't support directmove, don't know how to copy."); |
965 | 2 | BuildMI(MBB, I, DL, get(PPC::MFVSRD), DestReg).addReg(SrcReg); |
966 | 2 | getKillRegState(KillSrc); |
967 | 2 | return; |
968 | 3.90k | } else if (PPC::SPERCRegClass.contains(SrcReg) && |
969 | 3.90k | PPC::SPE4RCRegClass.contains(DestReg)9 ) { |
970 | 0 | BuildMI(MBB, I, DL, get(PPC::EFSCFD), DestReg).addReg(SrcReg); |
971 | 0 | getKillRegState(KillSrc); |
972 | 0 | return; |
973 | 3.90k | } else if (PPC::SPE4RCRegClass.contains(SrcReg) && |
974 | 3.90k | PPC::SPERCRegClass.contains(DestReg)461 ) { |
975 | 5 | BuildMI(MBB, I, DL, get(PPC::EFDCFS), DestReg).addReg(SrcReg); |
976 | 5 | getKillRegState(KillSrc); |
977 | 5 | return; |
978 | 5 | } |
979 | 3.90k | |
980 | 3.90k | unsigned Opc; |
981 | 3.90k | if (PPC::GPRCRegClass.contains(DestReg, SrcReg)) |
982 | 456 | Opc = PPC::OR; |
983 | 3.44k | else if (PPC::G8RCRegClass.contains(DestReg, SrcReg)) |
984 | 1.61k | Opc = PPC::OR8; |
985 | 1.83k | else if (PPC::F4RCRegClass.contains(DestReg, SrcReg)) |
986 | 1.22k | Opc = PPC::FMR; |
987 | 604 | else if (PPC::CRRCRegClass.contains(DestReg, SrcReg)) |
988 | 11 | Opc = PPC::MCRF; |
989 | 593 | else if (PPC::VRRCRegClass.contains(DestReg, SrcReg)) |
990 | 252 | Opc = PPC::VOR; |
991 | 341 | else if (PPC::VSRCRegClass.contains(DestReg, SrcReg)) |
992 | 67 | // There are two different ways this can be done: |
993 | 67 | // 1. xxlor : This has lower latency (on the P7), 2 cycles, but can only |
994 | 67 | // issue in VSU pipeline 0. |
995 | 67 | // 2. xmovdp/xmovsp: This has higher latency (on the P7), 6 cycles, but |
996 | 67 | // can go to either pipeline. |
997 | 67 | // We'll always use xxlor here, because in practically all cases where |
998 | 67 | // copies are generated, they are close enough to some use that the |
999 | 67 | // lower-latency form is preferable. |
1000 | 67 | Opc = PPC::XXLOR; |
1001 | 274 | else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg) || |
1002 | 274 | PPC::VSSRCRegClass.contains(DestReg, SrcReg)153 ) |
1003 | 121 | Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP87 : PPC::XXLORf34 ; |
1004 | 153 | else if (PPC::QFRCRegClass.contains(DestReg, SrcReg)) |
1005 | 120 | Opc = PPC::QVFMR; |
1006 | 33 | else if (PPC::QSRCRegClass.contains(DestReg, SrcReg)) |
1007 | 0 | Opc = PPC::QVFMRs; |
1008 | 33 | else if (PPC::QBRCRegClass.contains(DestReg, SrcReg)) |
1009 | 0 | Opc = PPC::QVFMRb; |
1010 | 33 | else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg)) |
1011 | 24 | Opc = PPC::CROR; |
1012 | 9 | else if (PPC::SPE4RCRegClass.contains(DestReg, SrcReg)) |
1013 | 0 | Opc = PPC::OR; |
1014 | 9 | else if (PPC::SPERCRegClass.contains(DestReg, SrcReg)) |
1015 | 9 | Opc = PPC::EVOR; |
1016 | 9 | else |
1017 | 9 | llvm_unreachable0 ("Impossible reg-to-reg copy"); |
1018 | 3.90k | |
1019 | 3.90k | const MCInstrDesc &MCID = get(Opc); |
1020 | 3.90k | if (MCID.getNumOperands() == 3) |
1021 | 2.54k | BuildMI(MBB, I, DL, MCID, DestReg) |
1022 | 2.54k | .addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc)); |
1023 | 1.36k | else |
1024 | 1.36k | BuildMI(MBB, I, DL, MCID, DestReg).addReg(SrcReg, getKillRegState(KillSrc)); |
1025 | 3.90k | } |
1026 | | |
1027 | | unsigned PPCInstrInfo::getStoreOpcodeForSpill(unsigned Reg, |
1028 | | const TargetRegisterClass *RC) |
1029 | 3.53k | const { |
1030 | 3.53k | const unsigned *OpcodesForSpill = getStoreOpcodesForSpillArray(); |
1031 | 3.53k | int OpcodeIndex = 0; |
1032 | 3.53k | |
1033 | 3.53k | if (RC != nullptr) { |
1034 | 2.74k | if (PPC::GPRCRegClass.hasSubClassEq(RC) || |
1035 | 2.74k | PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)2.47k ) { |
1036 | 269 | OpcodeIndex = SOK_Int4Spill; |
1037 | 2.47k | } else if (PPC::G8RCRegClass.hasSubClassEq(RC) || |
1038 | 2.47k | PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)1.22k ) { |
1039 | 1.25k | OpcodeIndex = SOK_Int8Spill; |
1040 | 1.25k | } else if (1.22k PPC::F8RCRegClass.hasSubClassEq(RC)1.22k ) { |
1041 | 813 | OpcodeIndex = SOK_Float8Spill; |
1042 | 813 | } else if (412 PPC::F4RCRegClass.hasSubClassEq(RC)412 ) { |
1043 | 4 | OpcodeIndex = SOK_Float4Spill; |
1044 | 408 | } else if (PPC::SPERCRegClass.hasSubClassEq(RC)) { |
1045 | 21 | OpcodeIndex = SOK_SPESpill; |
1046 | 387 | } else if (PPC::SPE4RCRegClass.hasSubClassEq(RC)) { |
1047 | 0 | OpcodeIndex = SOK_SPE4Spill; |
1048 | 387 | } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) { |
1049 | 28 | OpcodeIndex = SOK_CRSpill; |
1050 | 359 | } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) { |
1051 | 8 | OpcodeIndex = SOK_CRBitSpill; |
1052 | 351 | } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { |
1053 | 0 | OpcodeIndex = SOK_VRVectorSpill; |
1054 | 351 | } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) { |
1055 | 313 | OpcodeIndex = SOK_VSXVectorSpill; |
1056 | 313 | } else if (38 PPC::VSFRCRegClass.hasSubClassEq(RC)38 ) { |
1057 | 18 | OpcodeIndex = SOK_VectorFloat8Spill; |
1058 | 20 | } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) { |
1059 | 0 | OpcodeIndex = SOK_VectorFloat4Spill; |
1060 | 20 | } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { |
1061 | 0 | OpcodeIndex = SOK_VRSaveSpill; |
1062 | 20 | } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) { |
1063 | 12 | OpcodeIndex = SOK_QuadFloat8Spill; |
1064 | 12 | } else if (8 PPC::QSRCRegClass.hasSubClassEq(RC)8 ) { |
1065 | 8 | OpcodeIndex = SOK_QuadFloat4Spill; |
1066 | 8 | } else if (0 PPC::QBRCRegClass.hasSubClassEq(RC)0 ) { |
1067 | 0 | OpcodeIndex = SOK_QuadBitSpill; |
1068 | 0 | } else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) { |
1069 | 0 | OpcodeIndex = SOK_SpillToVSR; |
1070 | 0 | } else { |
1071 | 0 | llvm_unreachable("Unknown regclass!"); |
1072 | 0 | } |
1073 | 791 | } else { |
1074 | 791 | if (PPC::GPRCRegClass.contains(Reg) || |
1075 | 791 | PPC::GPRC_NOR0RegClass.contains(Reg)) { |
1076 | 0 | OpcodeIndex = SOK_Int4Spill; |
1077 | 791 | } else if (PPC::G8RCRegClass.contains(Reg) || |
1078 | 791 | PPC::G8RC_NOX0RegClass.contains(Reg)297 ) { |
1079 | 494 | OpcodeIndex = SOK_Int8Spill; |
1080 | 494 | } else if (297 PPC::F8RCRegClass.contains(Reg)297 ) { |
1081 | 176 | OpcodeIndex = SOK_Float8Spill; |
1082 | 176 | } else if (121 PPC::F4RCRegClass.contains(Reg)121 ) { |
1083 | 0 | OpcodeIndex = SOK_Float4Spill; |
1084 | 121 | } else if (PPC::SPERCRegClass.contains(Reg)) { |
1085 | 0 | OpcodeIndex = SOK_SPESpill; |
1086 | 121 | } else if (PPC::SPE4RCRegClass.contains(Reg)) { |
1087 | 0 | OpcodeIndex = SOK_SPE4Spill; |
1088 | 121 | } else if (PPC::CRRCRegClass.contains(Reg)) { |
1089 | 23 | OpcodeIndex = SOK_CRSpill; |
1090 | 98 | } else if (PPC::CRBITRCRegClass.contains(Reg)) { |
1091 | 0 | OpcodeIndex = SOK_CRBitSpill; |
1092 | 98 | } else if (PPC::VRRCRegClass.contains(Reg)) { |
1093 | 98 | OpcodeIndex = SOK_VRVectorSpill; |
1094 | 98 | } else if (0 PPC::VSRCRegClass.contains(Reg)0 ) { |
1095 | 0 | OpcodeIndex = SOK_VSXVectorSpill; |
1096 | 0 | } else if (PPC::VSFRCRegClass.contains(Reg)) { |
1097 | 0 | OpcodeIndex = SOK_VectorFloat8Spill; |
1098 | 0 | } else if (PPC::VSSRCRegClass.contains(Reg)) { |
1099 | 0 | OpcodeIndex = SOK_VectorFloat4Spill; |
1100 | 0 | } else if (PPC::VRSAVERCRegClass.contains(Reg)) { |
1101 | 0 | OpcodeIndex = SOK_VRSaveSpill; |
1102 | 0 | } else if (PPC::QFRCRegClass.contains(Reg)) { |
1103 | 0 | OpcodeIndex = SOK_QuadFloat8Spill; |
1104 | 0 | } else if (PPC::QSRCRegClass.contains(Reg)) { |
1105 | 0 | OpcodeIndex = SOK_QuadFloat4Spill; |
1106 | 0 | } else if (PPC::QBRCRegClass.contains(Reg)) { |
1107 | 0 | OpcodeIndex = SOK_QuadBitSpill; |
1108 | 0 | } else if (PPC::SPILLTOVSRRCRegClass.contains(Reg)) { |
1109 | 0 | OpcodeIndex = SOK_SpillToVSR; |
1110 | 0 | } else { |
1111 | 0 | llvm_unreachable("Unknown regclass!"); |
1112 | 0 | } |
1113 | 3.53k | } |
1114 | 3.53k | return OpcodesForSpill[OpcodeIndex]; |
1115 | 3.53k | } |
1116 | | |
1117 | | unsigned |
1118 | | PPCInstrInfo::getLoadOpcodeForSpill(unsigned Reg, |
1119 | 2.73k | const TargetRegisterClass *RC) const { |
1120 | 2.73k | const unsigned *OpcodesForSpill = getLoadOpcodesForSpillArray(); |
1121 | 2.73k | int OpcodeIndex = 0; |
1122 | 2.73k | |
1123 | 2.73k | if (RC != nullptr) { |
1124 | 2.73k | if (PPC::GPRCRegClass.hasSubClassEq(RC) || |
1125 | 2.73k | PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)2.42k ) { |
1126 | 318 | OpcodeIndex = SOK_Int4Spill; |
1127 | 2.42k | } else if (PPC::G8RCRegClass.hasSubClassEq(RC) || |
1128 | 2.42k | PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)1.20k ) { |
1129 | 1.21k | OpcodeIndex = SOK_Int8Spill; |
1130 | 1.21k | } else if (1.20k PPC::F8RCRegClass.hasSubClassEq(RC)1.20k ) { |
1131 | 803 | OpcodeIndex = SOK_Float8Spill; |
1132 | 803 | } else if (399 PPC::F4RCRegClass.hasSubClassEq(RC)399 ) { |
1133 | 3 | OpcodeIndex = SOK_Float4Spill; |
1134 | 396 | } else if (PPC::SPERCRegClass.hasSubClassEq(RC)) { |
1135 | 19 | OpcodeIndex = SOK_SPESpill; |
1136 | 377 | } else if (PPC::SPE4RCRegClass.hasSubClassEq(RC)) { |
1137 | 0 | OpcodeIndex = SOK_SPE4Spill; |
1138 | 377 | } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) { |
1139 | 28 | OpcodeIndex = SOK_CRSpill; |
1140 | 349 | } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) { |
1141 | 4 | OpcodeIndex = SOK_CRBitSpill; |
1142 | 345 | } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { |
1143 | 0 | OpcodeIndex = SOK_VRVectorSpill; |
1144 | 345 | } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) { |
1145 | 302 | OpcodeIndex = SOK_VSXVectorSpill; |
1146 | 302 | } else if (43 PPC::VSFRCRegClass.hasSubClassEq(RC)43 ) { |
1147 | 16 | OpcodeIndex = SOK_VectorFloat8Spill; |
1148 | 27 | } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) { |
1149 | 0 | OpcodeIndex = SOK_VectorFloat4Spill; |
1150 | 27 | } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { |
1151 | 0 | OpcodeIndex = SOK_VRSaveSpill; |
1152 | 27 | } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) { |
1153 | 15 | OpcodeIndex = SOK_QuadFloat8Spill; |
1154 | 15 | } else if (12 PPC::QSRCRegClass.hasSubClassEq(RC)12 ) { |
1155 | 12 | OpcodeIndex = SOK_QuadFloat4Spill; |
1156 | 12 | } else if (0 PPC::QBRCRegClass.hasSubClassEq(RC)0 ) { |
1157 | 0 | OpcodeIndex = SOK_QuadBitSpill; |
1158 | 0 | } else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) { |
1159 | 0 | OpcodeIndex = SOK_SpillToVSR; |
1160 | 0 | } else { |
1161 | 0 | llvm_unreachable("Unknown regclass!"); |
1162 | 0 | } |
1163 | 0 | } else { |
1164 | 0 | if (PPC::GPRCRegClass.contains(Reg) || |
1165 | 0 | PPC::GPRC_NOR0RegClass.contains(Reg)) { |
1166 | 0 | OpcodeIndex = SOK_Int4Spill; |
1167 | 0 | } else if (PPC::G8RCRegClass.contains(Reg) || |
1168 | 0 | PPC::G8RC_NOX0RegClass.contains(Reg)) { |
1169 | 0 | OpcodeIndex = SOK_Int8Spill; |
1170 | 0 | } else if (PPC::F8RCRegClass.contains(Reg)) { |
1171 | 0 | OpcodeIndex = SOK_Float8Spill; |
1172 | 0 | } else if (PPC::F4RCRegClass.contains(Reg)) { |
1173 | 0 | OpcodeIndex = SOK_Float4Spill; |
1174 | 0 | } else if (PPC::SPERCRegClass.contains(Reg)) { |
1175 | 0 | OpcodeIndex = SOK_SPESpill; |
1176 | 0 | } else if (PPC::SPE4RCRegClass.contains(Reg)) { |
1177 | 0 | OpcodeIndex = SOK_SPE4Spill; |
1178 | 0 | } else if (PPC::CRRCRegClass.contains(Reg)) { |
1179 | 0 | OpcodeIndex = SOK_CRSpill; |
1180 | 0 | } else if (PPC::CRBITRCRegClass.contains(Reg)) { |
1181 | 0 | OpcodeIndex = SOK_CRBitSpill; |
1182 | 0 | } else if (PPC::VRRCRegClass.contains(Reg)) { |
1183 | 0 | OpcodeIndex = SOK_VRVectorSpill; |
1184 | 0 | } else if (PPC::VSRCRegClass.contains(Reg)) { |
1185 | 0 | OpcodeIndex = SOK_VSXVectorSpill; |
1186 | 0 | } else if (PPC::VSFRCRegClass.contains(Reg)) { |
1187 | 0 | OpcodeIndex = SOK_VectorFloat8Spill; |
1188 | 0 | } else if (PPC::VSSRCRegClass.contains(Reg)) { |
1189 | 0 | OpcodeIndex = SOK_VectorFloat4Spill; |
1190 | 0 | } else if (PPC::VRSAVERCRegClass.contains(Reg)) { |
1191 | 0 | OpcodeIndex = SOK_VRSaveSpill; |
1192 | 0 | } else if (PPC::QFRCRegClass.contains(Reg)) { |
1193 | 0 | OpcodeIndex = SOK_QuadFloat8Spill; |
1194 | 0 | } else if (PPC::QSRCRegClass.contains(Reg)) { |
1195 | 0 | OpcodeIndex = SOK_QuadFloat4Spill; |
1196 | 0 | } else if (PPC::QBRCRegClass.contains(Reg)) { |
1197 | 0 | OpcodeIndex = SOK_QuadBitSpill; |
1198 | 0 | } else if (PPC::SPILLTOVSRRCRegClass.contains(Reg)) { |
1199 | 0 | OpcodeIndex = SOK_SpillToVSR; |
1200 | 0 | } else { |
1201 | 0 | llvm_unreachable("Unknown regclass!"); |
1202 | 0 | } |
1203 | 2.73k | } |
1204 | 2.73k | return OpcodesForSpill[OpcodeIndex]; |
1205 | 2.73k | } |
1206 | | |
1207 | | void PPCInstrInfo::StoreRegToStackSlot( |
1208 | | MachineFunction &MF, unsigned SrcReg, bool isKill, int FrameIdx, |
1209 | | const TargetRegisterClass *RC, |
1210 | 2.74k | SmallVectorImpl<MachineInstr *> &NewMIs) const { |
1211 | 2.74k | unsigned Opcode = getStoreOpcodeForSpill(PPC::NoRegister, RC); |
1212 | 2.74k | DebugLoc DL; |
1213 | 2.74k | |
1214 | 2.74k | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
1215 | 2.74k | FuncInfo->setHasSpills(); |
1216 | 2.74k | |
1217 | 2.74k | NewMIs.push_back(addFrameReference( |
1218 | 2.74k | BuildMI(MF, DL, get(Opcode)).addReg(SrcReg, getKillRegState(isKill)), |
1219 | 2.74k | FrameIdx)); |
1220 | 2.74k | |
1221 | 2.74k | if (PPC::CRRCRegClass.hasSubClassEq(RC) || |
1222 | 2.74k | PPC::CRBITRCRegClass.hasSubClassEq(RC)2.71k ) |
1223 | 36 | FuncInfo->setSpillsCR(); |
1224 | 2.74k | |
1225 | 2.74k | if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) |
1226 | 0 | FuncInfo->setSpillsVRSAVE(); |
1227 | 2.74k | |
1228 | 2.74k | if (isXFormMemOp(Opcode)) |
1229 | 237 | FuncInfo->setHasNonRISpills(); |
1230 | 2.74k | } |
1231 | | |
1232 | | void PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
1233 | | MachineBasicBlock::iterator MI, |
1234 | | unsigned SrcReg, bool isKill, |
1235 | | int FrameIdx, |
1236 | | const TargetRegisterClass *RC, |
1237 | 2.74k | const TargetRegisterInfo *TRI) const { |
1238 | 2.74k | MachineFunction &MF = *MBB.getParent(); |
1239 | 2.74k | SmallVector<MachineInstr *, 4> NewMIs; |
1240 | 2.74k | |
1241 | 2.74k | // We need to avoid a situation in which the value from a VRRC register is |
1242 | 2.74k | // spilled using an Altivec instruction and reloaded into a VSRC register |
1243 | 2.74k | // using a VSX instruction. The issue with this is that the VSX |
1244 | 2.74k | // load/store instructions swap the doublewords in the vector and the Altivec |
1245 | 2.74k | // ones don't. The register classes on the spill/reload may be different if |
1246 | 2.74k | // the register is defined using an Altivec instruction and is then used by a |
1247 | 2.74k | // VSX instruction. |
1248 | 2.74k | RC = updatedRC(RC); |
1249 | 2.74k | |
1250 | 2.74k | StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs); |
1251 | 2.74k | |
1252 | 5.48k | for (unsigned i = 0, e = NewMIs.size(); i != e; ++i2.74k ) |
1253 | 2.74k | MBB.insert(MI, NewMIs[i]); |
1254 | 2.74k | |
1255 | 2.74k | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
1256 | 2.74k | MachineMemOperand *MMO = MF.getMachineMemOperand( |
1257 | 2.74k | MachinePointerInfo::getFixedStack(MF, FrameIdx), |
1258 | 2.74k | MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), |
1259 | 2.74k | MFI.getObjectAlignment(FrameIdx)); |
1260 | 2.74k | NewMIs.back()->addMemOperand(MF, MMO); |
1261 | 2.74k | } |
1262 | | |
1263 | | void PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL, |
1264 | | unsigned DestReg, int FrameIdx, |
1265 | | const TargetRegisterClass *RC, |
1266 | | SmallVectorImpl<MachineInstr *> &NewMIs) |
1267 | 2.73k | const { |
1268 | 2.73k | unsigned Opcode = getLoadOpcodeForSpill(PPC::NoRegister, RC); |
1269 | 2.73k | NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Opcode), DestReg), |
1270 | 2.73k | FrameIdx)); |
1271 | 2.73k | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
1272 | 2.73k | |
1273 | 2.73k | if (PPC::CRRCRegClass.hasSubClassEq(RC) || |
1274 | 2.73k | PPC::CRBITRCRegClass.hasSubClassEq(RC)2.71k ) |
1275 | 32 | FuncInfo->setSpillsCR(); |
1276 | 2.73k | |
1277 | 2.73k | if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) |
1278 | 0 | FuncInfo->setSpillsVRSAVE(); |
1279 | 2.73k | |
1280 | 2.73k | if (isXFormMemOp(Opcode)) |
1281 | 231 | FuncInfo->setHasNonRISpills(); |
1282 | 2.73k | } |
1283 | | |
1284 | | void |
1285 | | PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
1286 | | MachineBasicBlock::iterator MI, |
1287 | | unsigned DestReg, int FrameIdx, |
1288 | | const TargetRegisterClass *RC, |
1289 | 2.73k | const TargetRegisterInfo *TRI) const { |
1290 | 2.73k | MachineFunction &MF = *MBB.getParent(); |
1291 | 2.73k | SmallVector<MachineInstr*, 4> NewMIs; |
1292 | 2.73k | DebugLoc DL; |
1293 | 2.73k | if (MI != MBB.end()) DL = MI->getDebugLoc()2.73k ; |
1294 | 2.73k | |
1295 | 2.73k | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
1296 | 2.73k | FuncInfo->setHasSpills(); |
1297 | 2.73k | |
1298 | 2.73k | // We need to avoid a situation in which the value from a VRRC register is |
1299 | 2.73k | // spilled using an Altivec instruction and reloaded into a VSRC register |
1300 | 2.73k | // using a VSX instruction. The issue with this is that the VSX |
1301 | 2.73k | // load/store instructions swap the doublewords in the vector and the Altivec |
1302 | 2.73k | // ones don't. The register classes on the spill/reload may be different if |
1303 | 2.73k | // the register is defined using an Altivec instruction and is then used by a |
1304 | 2.73k | // VSX instruction. |
1305 | 2.73k | if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass1.55k ) |
1306 | 209 | RC = &PPC::VSRCRegClass; |
1307 | 2.73k | |
1308 | 2.73k | LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs); |
1309 | 2.73k | |
1310 | 5.47k | for (unsigned i = 0, e = NewMIs.size(); i != e; ++i2.73k ) |
1311 | 2.73k | MBB.insert(MI, NewMIs[i]); |
1312 | 2.73k | |
1313 | 2.73k | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
1314 | 2.73k | MachineMemOperand *MMO = MF.getMachineMemOperand( |
1315 | 2.73k | MachinePointerInfo::getFixedStack(MF, FrameIdx), |
1316 | 2.73k | MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), |
1317 | 2.73k | MFI.getObjectAlignment(FrameIdx)); |
1318 | 2.73k | NewMIs.back()->addMemOperand(MF, MMO); |
1319 | 2.73k | } |
1320 | | |
1321 | | bool PPCInstrInfo:: |
1322 | 13.2k | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
1323 | 13.2k | assert(Cond.size() == 2 && "Invalid PPC branch opcode!"); |
1324 | 13.2k | if (Cond[1].getReg() == PPC::CTR8 || Cond[1].getReg() == PPC::CTR13.0k ) |
1325 | 310 | Cond[0].setImm(Cond[0].getImm() == 0 ? 169 : 0241 ); |
1326 | 12.9k | else |
1327 | 12.9k | // Leave the CR# the same, but invert the condition. |
1328 | 12.9k | Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm())); |
1329 | 13.2k | return false; |
1330 | 13.2k | } |
1331 | | |
1332 | | bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, |
1333 | 4.58k | unsigned Reg, MachineRegisterInfo *MRI) const { |
1334 | 4.58k | // For some instructions, it is legal to fold ZERO into the RA register field. |
1335 | 4.58k | // A zero immediate should always be loaded with a single li. |
1336 | 4.58k | unsigned DefOpc = DefMI.getOpcode(); |
1337 | 4.58k | if (DefOpc != PPC::LI && DefOpc != PPC::LI83.84k ) |
1338 | 1.23k | return false; |
1339 | 3.35k | if (!DefMI.getOperand(1).isImm()) |
1340 | 28 | return false; |
1341 | 3.32k | if (DefMI.getOperand(1).getImm() != 0) |
1342 | 2.18k | return false; |
1343 | 1.13k | |
1344 | 1.13k | // Note that we cannot here invert the arguments of an isel in order to fold |
1345 | 1.13k | // a ZERO into what is presented as the second argument. All we have here |
1346 | 1.13k | // is the condition bit, and that might come from a CR-logical bit operation. |
1347 | 1.13k | |
1348 | 1.13k | const MCInstrDesc &UseMCID = UseMI.getDesc(); |
1349 | 1.13k | |
1350 | 1.13k | // Only fold into real machine instructions. |
1351 | 1.13k | if (UseMCID.isPseudo()) |
1352 | 414 | return false; |
1353 | 719 | |
1354 | 719 | unsigned UseIdx; |
1355 | 1.03k | for (UseIdx = 0; UseIdx < UseMI.getNumOperands(); ++UseIdx319 ) |
1356 | 1.03k | if (UseMI.getOperand(UseIdx).isReg() && |
1357 | 1.03k | UseMI.getOperand(UseIdx).getReg() == Reg1.03k ) |
1358 | 719 | break; |
1359 | 719 | |
1360 | 719 | assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI"); |
1361 | 719 | assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg"); |
1362 | 719 | |
1363 | 719 | const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx]; |
1364 | 719 | |
1365 | 719 | // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0 |
1366 | 719 | // register (which might also be specified as a pointer class kind). |
1367 | 719 | if (UseInfo->isLookupPtrRegClass()) { |
1368 | 10 | if (UseInfo->RegClass /* Kind */ != 1) |
1369 | 10 | return false; |
1370 | 709 | } else { |
1371 | 709 | if (UseInfo->RegClass != PPC::GPRC_NOR0RegClassID && |
1372 | 709 | UseInfo->RegClass != PPC::G8RC_NOX0RegClassID681 ) |
1373 | 634 | return false; |
1374 | 75 | } |
1375 | 75 | |
1376 | 75 | // Make sure this is not tied to an output register (or otherwise |
1377 | 75 | // constrained). This is true for ST?UX registers, for example, which |
1378 | 75 | // are tied to their output registers. |
1379 | 75 | if (UseInfo->Constraints != 0) |
1380 | 0 | return false; |
1381 | 75 | |
1382 | 75 | unsigned ZeroReg; |
1383 | 75 | if (UseInfo->isLookupPtrRegClass()) { |
1384 | 0 | bool isPPC64 = Subtarget.isPPC64(); |
1385 | 0 | ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO; |
1386 | 75 | } else { |
1387 | 75 | ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ? |
1388 | 47 | PPC::ZERO8 : PPC::ZERO28 ; |
1389 | 75 | } |
1390 | 75 | |
1391 | 75 | bool DeleteDef = MRI->hasOneNonDBGUse(Reg); |
1392 | 75 | UseMI.getOperand(UseIdx).setReg(ZeroReg); |
1393 | 75 | |
1394 | 75 | if (DeleteDef) |
1395 | 72 | DefMI.eraseFromParent(); |
1396 | 75 | |
1397 | 75 | return true; |
1398 | 75 | } |
1399 | | |
1400 | 1 | static bool MBBDefinesCTR(MachineBasicBlock &MBB) { |
1401 | 1 | for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end(); |
1402 | 4 | I != IE; ++I3 ) |
1403 | 3 | if (I->definesRegister(PPC::CTR) || I->definesRegister(PPC::CTR8)) |
1404 | 0 | return true; |
1405 | 1 | return false; |
1406 | 1 | } |
1407 | | |
1408 | | // We should make sure that, if we're going to predicate both sides of a |
1409 | | // condition (a diamond), that both sides don't define the counter register. We |
1410 | | // can predicate counter-decrement-based branches, but while that predicates |
1411 | | // the branching, it does not predicate the counter decrement. If we tried to |
1412 | | // merge the triangle into one predicated block, we'd decrement the counter |
1413 | | // twice. |
1414 | | bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, |
1415 | | unsigned NumT, unsigned ExtraT, |
1416 | | MachineBasicBlock &FMBB, |
1417 | | unsigned NumF, unsigned ExtraF, |
1418 | 1 | BranchProbability Probability) const { |
1419 | 1 | return !(MBBDefinesCTR(TMBB) && MBBDefinesCTR(FMBB)0 ); |
1420 | 1 | } |
1421 | | |
1422 | | |
1423 | 857k | bool PPCInstrInfo::isPredicated(const MachineInstr &MI) const { |
1424 | 857k | // The predicated branches are identified by their type, not really by the |
1425 | 857k | // explicit presence of a predicate. Furthermore, some of them can be |
1426 | 857k | // predicated more than once. Because if conversion won't try to predicate |
1427 | 857k | // any instruction which already claims to be predicated (by returning true |
1428 | 857k | // here), always return false. In doing so, we let isPredicable() be the |
1429 | 857k | // final word on whether not the instruction can be (further) predicated. |
1430 | 857k | |
1431 | 857k | return false; |
1432 | 857k | } |
1433 | | |
1434 | 1.07M | bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { |
1435 | 1.07M | if (!MI.isTerminator()) |
1436 | 528k | return false; |
1437 | 550k | |
1438 | 550k | // Conditional branch is a special case. |
1439 | 550k | if (MI.isBranch() && !MI.isBarrier()191k ) |
1440 | 125k | return true; |
1441 | 424k | |
1442 | 424k | return !isPredicated(MI); |
1443 | 424k | } |
1444 | | |
1445 | | bool PPCInstrInfo::PredicateInstruction(MachineInstr &MI, |
1446 | 281 | ArrayRef<MachineOperand> Pred) const { |
1447 | 281 | unsigned OpC = MI.getOpcode(); |
1448 | 281 | if (OpC == PPC::BLR || OpC == PPC::BLR8274 ) { |
1449 | 281 | if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR279 ) { |
1450 | 3 | bool isPPC64 = Subtarget.isPPC64(); |
1451 | 3 | MI.setDesc(get(Pred[0].getImm() ? (isPPC64 0 ? PPC::BDNZLR80 : PPC::BDNZLR0 ) |
1452 | 3 | : (isPPC64 ? PPC::BDZLR82 : PPC::BDZLR1 ))); |
1453 | 278 | } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) { |
1454 | 86 | MI.setDesc(get(PPC::BCLR)); |
1455 | 86 | MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]); |
1456 | 192 | } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) { |
1457 | 23 | MI.setDesc(get(PPC::BCLRn)); |
1458 | 23 | MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]); |
1459 | 169 | } else { |
1460 | 169 | MI.setDesc(get(PPC::BCCLR)); |
1461 | 169 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
1462 | 169 | .addImm(Pred[0].getImm()) |
1463 | 169 | .add(Pred[1]); |
1464 | 169 | } |
1465 | 281 | |
1466 | 281 | return true; |
1467 | 281 | } else if (0 OpC == PPC::B0 ) { |
1468 | 0 | if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { |
1469 | 0 | bool isPPC64 = Subtarget.isPPC64(); |
1470 | 0 | MI.setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) |
1471 | 0 | : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))); |
1472 | 0 | } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) { |
1473 | 0 | MachineBasicBlock *MBB = MI.getOperand(0).getMBB(); |
1474 | 0 | MI.RemoveOperand(0); |
1475 | 0 |
|
1476 | 0 | MI.setDesc(get(PPC::BC)); |
1477 | 0 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
1478 | 0 | .add(Pred[1]) |
1479 | 0 | .addMBB(MBB); |
1480 | 0 | } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) { |
1481 | 0 | MachineBasicBlock *MBB = MI.getOperand(0).getMBB(); |
1482 | 0 | MI.RemoveOperand(0); |
1483 | 0 |
|
1484 | 0 | MI.setDesc(get(PPC::BCn)); |
1485 | 0 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
1486 | 0 | .add(Pred[1]) |
1487 | 0 | .addMBB(MBB); |
1488 | 0 | } else { |
1489 | 0 | MachineBasicBlock *MBB = MI.getOperand(0).getMBB(); |
1490 | 0 | MI.RemoveOperand(0); |
1491 | 0 |
|
1492 | 0 | MI.setDesc(get(PPC::BCC)); |
1493 | 0 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
1494 | 0 | .addImm(Pred[0].getImm()) |
1495 | 0 | .add(Pred[1]) |
1496 | 0 | .addMBB(MBB); |
1497 | 0 | } |
1498 | 0 |
|
1499 | 0 | return true; |
1500 | 0 | } else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL || |
1501 | 0 | OpC == PPC::BCTRL8) { |
1502 | 0 | if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) |
1503 | 0 | llvm_unreachable("Cannot predicate bctr[l] on the ctr register"); |
1504 | 0 |
|
1505 | 0 | bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8; |
1506 | 0 | bool isPPC64 = Subtarget.isPPC64(); |
1507 | 0 |
|
1508 | 0 | if (Pred[0].getImm() == PPC::PRED_BIT_SET) { |
1509 | 0 | MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) |
1510 | 0 | : (setLR ? PPC::BCCTRL : PPC::BCCTR))); |
1511 | 0 | MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]); |
1512 | 0 | return true; |
1513 | 0 | } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) { |
1514 | 0 | MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n) |
1515 | 0 | : (setLR ? PPC::BCCTRLn : PPC::BCCTRn))); |
1516 | 0 | MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]); |
1517 | 0 | return true; |
1518 | 0 | } |
1519 | 0 |
|
1520 | 0 | MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8) |
1521 | 0 | : (setLR ? PPC::BCCCTRL : PPC::BCCCTR))); |
1522 | 0 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
1523 | 0 | .addImm(Pred[0].getImm()) |
1524 | 0 | .add(Pred[1]); |
1525 | 0 | return true; |
1526 | 0 | } |
1527 | 0 |
|
1528 | 0 | return false; |
1529 | 0 | } |
1530 | | |
1531 | | bool PPCInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, |
1532 | 2 | ArrayRef<MachineOperand> Pred2) const { |
1533 | 2 | assert(Pred1.size() == 2 && "Invalid PPC first predicate"); |
1534 | 2 | assert(Pred2.size() == 2 && "Invalid PPC second predicate"); |
1535 | 2 | |
1536 | 2 | if (Pred1[1].getReg() == PPC::CTR8 || Pred1[1].getReg() == PPC::CTR) |
1537 | 0 | return false; |
1538 | 2 | if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR) |
1539 | 0 | return false; |
1540 | 2 | |
1541 | 2 | // P1 can only subsume P2 if they test the same condition register. |
1542 | 2 | if (Pred1[1].getReg() != Pred2[1].getReg()) |
1543 | 1 | return false; |
1544 | 1 | |
1545 | 1 | PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm(); |
1546 | 1 | PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm(); |
1547 | 1 | |
1548 | 1 | if (P1 == P2) |
1549 | 1 | return true; |
1550 | 0 | |
1551 | 0 | // Does P1 subsume P2, e.g. GE subsumes GT. |
1552 | 0 | if (P1 == PPC::PRED_LE && |
1553 | 0 | (P2 == PPC::PRED_LT || P2 == PPC::PRED_EQ)) |
1554 | 0 | return true; |
1555 | 0 | if (P1 == PPC::PRED_GE && |
1556 | 0 | (P2 == PPC::PRED_GT || P2 == PPC::PRED_EQ)) |
1557 | 0 | return true; |
1558 | 0 | |
1559 | 0 | return false; |
1560 | 0 | } |
1561 | | |
1562 | | bool PPCInstrInfo::DefinesPredicate(MachineInstr &MI, |
1563 | 15.1k | std::vector<MachineOperand> &Pred) const { |
1564 | 15.1k | // Note: At the present time, the contents of Pred from this function is |
1565 | 15.1k | // unused by IfConversion. This implementation follows ARM by pushing the |
1566 | 15.1k | // CR-defining operand. Because the 'DZ' and 'DNZ' count as types of |
1567 | 15.1k | // predicate, instructions defining CTR or CTR8 are also included as |
1568 | 15.1k | // predicate-defining instructions. |
1569 | 15.1k | |
1570 | 15.1k | const TargetRegisterClass *RCs[] = |
1571 | 15.1k | { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass, |
1572 | 15.1k | &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass }; |
1573 | 15.1k | |
1574 | 15.1k | bool Found = false; |
1575 | 61.3k | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i46.1k ) { |
1576 | 46.1k | const MachineOperand &MO = MI.getOperand(i); |
1577 | 213k | for (unsigned c = 0; c < array_lengthof(RCs) && !Found172k ; ++c167k ) { |
1578 | 167k | const TargetRegisterClass *RC = RCs[c]; |
1579 | 167k | if (MO.isReg()) { |
1580 | 133k | if (MO.isDef() && RC->contains(MO.getReg())54.4k ) { |
1581 | 1.26k | Pred.push_back(MO); |
1582 | 1.26k | Found = true; |
1583 | 1.26k | } |
1584 | 133k | } else if (33.6k MO.isRegMask()33.6k ) { |
1585 | 346 | for (TargetRegisterClass::iterator I = RC->begin(), |
1586 | 3.11k | IE = RC->end(); I != IE; ++I2.76k ) |
1587 | 2.76k | if (MO.clobbersPhysReg(*I)) { |
1588 | 1.73k | Pred.push_back(MO); |
1589 | 1.73k | Found = true; |
1590 | 1.73k | } |
1591 | 346 | } |
1592 | 167k | } |
1593 | 46.1k | } |
1594 | 15.1k | |
1595 | 15.1k | return Found; |
1596 | 15.1k | } |
1597 | | |
1598 | 15.1k | bool PPCInstrInfo::isPredicable(const MachineInstr &MI) const { |
1599 | 15.1k | unsigned OpC = MI.getOpcode(); |
1600 | 15.1k | switch (OpC) { |
1601 | 15.1k | default: |
1602 | 14.5k | return false; |
1603 | 15.1k | case PPC::B: |
1604 | 609 | case PPC::BLR: |
1605 | 609 | case PPC::BLR8: |
1606 | 609 | case PPC::BCTR: |
1607 | 609 | case PPC::BCTR8: |
1608 | 609 | case PPC::BCTRL: |
1609 | 609 | case PPC::BCTRL8: |
1610 | 609 | return true; |
1611 | 15.1k | } |
1612 | 15.1k | } |
1613 | | |
1614 | | bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, |
1615 | | unsigned &SrcReg2, int &Mask, |
1616 | 2.20k | int &Value) const { |
1617 | 2.20k | unsigned Opc = MI.getOpcode(); |
1618 | 2.20k | |
1619 | 2.20k | switch (Opc) { |
1620 | 2.20k | default: return false56 ; |
1621 | 2.20k | case PPC::CMPWI: |
1622 | 832 | case PPC::CMPLWI: |
1623 | 832 | case PPC::CMPDI: |
1624 | 832 | case PPC::CMPLDI: |
1625 | 832 | SrcReg = MI.getOperand(1).getReg(); |
1626 | 832 | SrcReg2 = 0; |
1627 | 832 | Value = MI.getOperand(2).getImm(); |
1628 | 832 | Mask = 0xFFFF; |
1629 | 832 | return true; |
1630 | 1.31k | case PPC::CMPW: |
1631 | 1.31k | case PPC::CMPLW: |
1632 | 1.31k | case PPC::CMPD: |
1633 | 1.31k | case PPC::CMPLD: |
1634 | 1.31k | case PPC::FCMPUS: |
1635 | 1.31k | case PPC::FCMPUD: |
1636 | 1.31k | SrcReg = MI.getOperand(1).getReg(); |
1637 | 1.31k | SrcReg2 = MI.getOperand(2).getReg(); |
1638 | 1.31k | Value = 0; |
1639 | 1.31k | Mask = 0; |
1640 | 1.31k | return true; |
1641 | 2.20k | } |
1642 | 2.20k | } |
1643 | | |
1644 | | bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, |
1645 | | unsigned SrcReg2, int Mask, int Value, |
1646 | 2.14k | const MachineRegisterInfo *MRI) const { |
1647 | 2.14k | if (DisableCmpOpt) |
1648 | 0 | return false; |
1649 | 2.14k | |
1650 | 2.14k | int OpC = CmpInstr.getOpcode(); |
1651 | 2.14k | unsigned CRReg = CmpInstr.getOperand(0).getReg(); |
1652 | 2.14k | |
1653 | 2.14k | // FP record forms set CR1 based on the exception status bits, not a |
1654 | 2.14k | // comparison with zero. |
1655 | 2.14k | if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD1.79k ) |
1656 | 505 | return false; |
1657 | 1.64k | |
1658 | 1.64k | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
1659 | 1.64k | // The record forms set the condition register based on a signed comparison |
1660 | 1.64k | // with zero (so says the ISA manual). This is not as straightforward as it |
1661 | 1.64k | // seems, however, because this is always a 64-bit comparison on PPC64, even |
1662 | 1.64k | // for instructions that are 32-bit in nature (like slw for example). |
1663 | 1.64k | // So, on PPC32, for unsigned comparisons, we can use the record forms only |
1664 | 1.64k | // for equality checks (as those don't depend on the sign). On PPC64, |
1665 | 1.64k | // we are restricted to equality for unsigned 64-bit comparisons and for |
1666 | 1.64k | // signed 32-bit comparisons the applicability is more restricted. |
1667 | 1.64k | bool isPPC64 = Subtarget.isPPC64(); |
1668 | 1.64k | bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW1.43k ; |
1669 | 1.64k | bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW1.25k ; |
1670 | 1.64k | bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD1.48k ; |
1671 | 1.64k | |
1672 | 1.64k | // Look through copies unless that gets us to a physical register. |
1673 | 1.64k | unsigned ActualSrc = TRI->lookThruCopyLike(SrcReg, MRI); |
1674 | 1.64k | if (TargetRegisterInfo::isVirtualRegister(ActualSrc)) |
1675 | 769 | SrcReg = ActualSrc; |
1676 | 1.64k | |
1677 | 1.64k | // Get the unique definition of SrcReg. |
1678 | 1.64k | MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); |
1679 | 1.64k | if (!MI) return false0 ; |
1680 | 1.64k | |
1681 | 1.64k | bool equalityOnly = false; |
1682 | 1.64k | bool noSub = false; |
1683 | 1.64k | if (isPPC64) { |
1684 | 1.51k | if (is32BitSignedCompare) { |
1685 | 496 | // We can perform this optimization only if MI is sign-extending. |
1686 | 496 | if (isSignExtended(*MI)) |
1687 | 241 | noSub = true; |
1688 | 255 | else |
1689 | 255 | return false; |
1690 | 1.01k | } else if (is32BitUnsignedCompare) { |
1691 | 463 | // We can perform this optimization, equality only, if MI is |
1692 | 463 | // zero-extending. |
1693 | 463 | if (isZeroExtended(*MI)) { |
1694 | 212 | noSub = true; |
1695 | 212 | equalityOnly = true; |
1696 | 212 | } else |
1697 | 251 | return false; |
1698 | 552 | } else |
1699 | 552 | equalityOnly = is64BitUnsignedCompare; |
1700 | 1.51k | } else |
1701 | 133 | equalityOnly = is32BitUnsignedCompare; |
1702 | 1.64k | |
1703 | 1.64k | if (1.13k equalityOnly1.13k ) { |
1704 | 562 | // We need to check the uses of the condition register in order to reject |
1705 | 562 | // non-equality comparisons. |
1706 | 562 | for (MachineRegisterInfo::use_instr_iterator |
1707 | 562 | I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end(); |
1708 | 997 | I != IE; ++I435 ) { |
1709 | 574 | MachineInstr *UseMI = &*I; |
1710 | 574 | if (UseMI->getOpcode() == PPC::BCC) { |
1711 | 461 | PPC::Predicate Pred = (PPC::Predicate)UseMI->getOperand(0).getImm(); |
1712 | 461 | unsigned PredCond = PPC::getPredicateCondition(Pred); |
1713 | 461 | // We ignore hint bits when checking for non-equality comparisons. |
1714 | 461 | if (PredCond != PPC::PRED_EQ && PredCond != PPC::PRED_NE262 ) |
1715 | 70 | return false; |
1716 | 113 | } else if (UseMI->getOpcode() == PPC::ISEL || |
1717 | 113 | UseMI->getOpcode() == PPC::ISEL880 ) { |
1718 | 89 | unsigned SubIdx = UseMI->getOperand(3).getSubReg(); |
1719 | 89 | if (SubIdx != PPC::sub_eq) |
1720 | 45 | return false; |
1721 | 24 | } else |
1722 | 24 | return false; |
1723 | 574 | } |
1724 | 562 | } |
1725 | 1.13k | |
1726 | 1.13k | MachineBasicBlock::iterator I = CmpInstr; |
1727 | 999 | |
1728 | 999 | // Scan forward to find the first use of the compare. |
1729 | 2.12k | for (MachineBasicBlock::iterator EL = CmpInstr.getParent()->end(); I != EL; |
1730 | 2.11k | ++I1.12k ) { |
1731 | 2.11k | bool FoundUse = false; |
1732 | 2.11k | for (MachineRegisterInfo::use_instr_iterator |
1733 | 2.11k | J = MRI->use_instr_begin(CRReg), JE = MRI->use_instr_end(); |
1734 | 3.39k | J != JE; ++J1.27k ) |
1735 | 2.26k | if (&*J == &*I) { |
1736 | 991 | FoundUse = true; |
1737 | 991 | break; |
1738 | 991 | } |
1739 | 2.11k | |
1740 | 2.11k | if (FoundUse) |
1741 | 991 | break; |
1742 | 2.11k | } |
1743 | 999 | |
1744 | 999 | SmallVector<std::pair<MachineOperand*, PPC::Predicate>, 4> PredsToUpdate; |
1745 | 999 | SmallVector<std::pair<MachineOperand*, unsigned>, 4> SubRegsToUpdate; |
1746 | 999 | |
1747 | 999 | // There are two possible candidates which can be changed to set CR[01]. |
1748 | 999 | // One is MI, the other is a SUB instruction. |
1749 | 999 | // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). |
1750 | 999 | MachineInstr *Sub = nullptr; |
1751 | 999 | if (SrcReg2 != 0) |
1752 | 456 | // MI is not a candidate for CMPrr. |
1753 | 456 | MI = nullptr; |
1754 | 543 | // FIXME: Conservatively refuse to convert an instruction which isn't in the |
1755 | 543 | // same BB as the comparison. This is to allow the check below to avoid calls |
1756 | 543 | // (and other explicit clobbers); instead we should really check for these |
1757 | 543 | // more explicitly (in at least a few predecessors). |
1758 | 543 | else if (MI->getParent() != CmpInstr.getParent()) |
1759 | 65 | return false; |
1760 | 478 | else if (Value != 0) { |
1761 | 156 | // The record-form instructions set CR bit based on signed comparison |
1762 | 156 | // against 0. We try to convert a compare against 1 or -1 into a compare |
1763 | 156 | // against 0 to exploit record-form instructions. For example, we change |
1764 | 156 | // the condition "greater than -1" into "greater than or equal to 0" |
1765 | 156 | // and "less than 1" into "less than or equal to 0". |
1766 | 156 | |
1767 | 156 | // Since we optimize comparison based on a specific branch condition, |
1768 | 156 | // we don't optimize if condition code is used by more than once. |
1769 | 156 | if (equalityOnly || !MRI->hasOneUse(CRReg)92 ) |
1770 | 72 | return false; |
1771 | 84 | |
1772 | 84 | MachineInstr *UseMI = &*MRI->use_instr_begin(CRReg); |
1773 | 84 | if (UseMI->getOpcode() != PPC::BCC) |
1774 | 19 | return false; |
1775 | 65 | |
1776 | 65 | PPC::Predicate Pred = (PPC::Predicate)UseMI->getOperand(0).getImm(); |
1777 | 65 | unsigned PredCond = PPC::getPredicateCondition(Pred); |
1778 | 65 | unsigned PredHint = PPC::getPredicateHint(Pred); |
1779 | 65 | int16_t Immed = (int16_t)Value; |
1780 | 65 | |
1781 | 65 | // When modifying the condition in the predicate, we propagate hint bits |
1782 | 65 | // from the original predicate to the new one. |
1783 | 65 | if (Immed == -1 && PredCond == PPC::PRED_GT7 ) |
1784 | 1 | // We convert "greater than -1" into "greater than or equal to 0", |
1785 | 1 | // since we are assuming signed comparison by !equalityOnly |
1786 | 1 | Pred = PPC::getPredicate(PPC::PRED_GE, PredHint); |
1787 | 64 | else if (Immed == -1 && PredCond == PPC::PRED_LE6 ) |
1788 | 2 | // We convert "less than or equal to -1" into "less than 0". |
1789 | 2 | Pred = PPC::getPredicate(PPC::PRED_LT, PredHint); |
1790 | 62 | else if (Immed == 1 && PredCond == PPC::PRED_LT32 ) |
1791 | 15 | // We convert "less than 1" into "less than or equal to 0". |
1792 | 15 | Pred = PPC::getPredicate(PPC::PRED_LE, PredHint); |
1793 | 47 | else if (Immed == 1 && PredCond == PPC::PRED_GE17 ) |
1794 | 15 | // We convert "greater than or equal to 1" into "greater than 0". |
1795 | 15 | Pred = PPC::getPredicate(PPC::PRED_GT, PredHint); |
1796 | 32 | else |
1797 | 32 | return false; |
1798 | 33 | |
1799 | 33 | PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)), Pred)); |
1800 | 33 | } |
1801 | 999 | |
1802 | 999 | // Search for Sub. |
1803 | 999 | --I; |
1804 | 811 | |
1805 | 811 | // Get ready to iterate backward from CmpInstr. |
1806 | 811 | MachineBasicBlock::iterator E = MI, B = CmpInstr.getParent()->begin(); |
1807 | 811 | |
1808 | 2.48k | for (; I != E && !noSub2.29k ; --I1.67k ) { |
1809 | 1.96k | const MachineInstr &Instr = *I; |
1810 | 1.96k | unsigned IOpC = Instr.getOpcode(); |
1811 | 1.96k | |
1812 | 1.96k | if (&*I != &CmpInstr && (1.48k Instr.modifiesRegister(PPC::CR0, TRI)1.48k || |
1813 | 1.48k | Instr.readsRegister(PPC::CR0, TRI)1.47k )) |
1814 | 26 | // This instruction modifies or uses the record condition register after |
1815 | 26 | // the one we want to change. While we could do this transformation, it |
1816 | 26 | // would likely not be profitable. This transformation removes one |
1817 | 26 | // instruction, and so even forcing RA to generate one move probably |
1818 | 26 | // makes it unprofitable. |
1819 | 26 | return false; |
1820 | 1.93k | |
1821 | 1.93k | // Check whether CmpInstr can be made redundant by the current instruction. |
1822 | 1.93k | if ((OpC == PPC::CMPW || OpC == PPC::CMPLW1.89k || |
1823 | 1.93k | OpC == PPC::CMPD1.87k || OpC == PPC::CMPLD676 ) && |
1824 | 1.93k | (1.45k IOpC == PPC::SUBF1.45k || IOpC == PPC::SUBF81.45k ) && |
1825 | 1.93k | (12 (12 Instr.getOperand(1).getReg() == SrcReg12 && |
1826 | 12 | Instr.getOperand(2).getReg() == SrcReg20 ) || |
1827 | 12 | (Instr.getOperand(1).getReg() == SrcReg2 && |
1828 | 12 | Instr.getOperand(2).getReg() == SrcReg11 ))) { |
1829 | 10 | Sub = &*I; |
1830 | 10 | break; |
1831 | 10 | } |
1832 | 1.92k | |
1833 | 1.92k | if (I == B) |
1834 | 251 | // The 'and' is below the comparison instruction. |
1835 | 251 | return false; |
1836 | 1.92k | } |
1837 | 811 | |
1838 | 811 | // Return false if no candidates exist. |
1839 | 811 | if (534 !MI534 && !Sub193 ) |
1840 | 183 | return false; |
1841 | 351 | |
1842 | 351 | // The single candidate is called MI. |
1843 | 351 | if (!MI) MI = Sub10 ; |
1844 | 351 | |
1845 | 351 | int NewOpC = -1; |
1846 | 351 | int MIOpC = MI->getOpcode(); |
1847 | 351 | if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8349 || |
1848 | 351 | MIOpC == PPC::ANDISo347 || MIOpC == PPC::ANDISo8347 ) |
1849 | 7 | NewOpC = MIOpC; |
1850 | 344 | else { |
1851 | 344 | NewOpC = PPC::getRecordFormOpcode(MIOpC); |
1852 | 344 | if (NewOpC == -1 && PPC::getNonRecordFormOpcode(MIOpC) != -1216 ) |
1853 | 0 | NewOpC = MIOpC; |
1854 | 344 | } |
1855 | 351 | |
1856 | 351 | // FIXME: On the non-embedded POWER architectures, only some of the record |
1857 | 351 | // forms are fast, and we should use only the fast ones. |
1858 | 351 | |
1859 | 351 | // The defining instruction has a record form (or is already a record |
1860 | 351 | // form). It is possible, however, that we'll need to reverse the condition |
1861 | 351 | // code of the users. |
1862 | 351 | if (NewOpC == -1) |
1863 | 216 | return false; |
1864 | 135 | |
1865 | 135 | // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP |
1866 | 135 | // needs to be updated to be based on SUB. Push the condition code |
1867 | 135 | // operands to OperandsToUpdate. If it is safe to remove CmpInstr, the |
1868 | 135 | // condition code of these operands will be modified. |
1869 | 135 | // Here, Value == 0 means we haven't converted comparison against 1 or -1 to |
1870 | 135 | // comparison against 0, which may modify predicate. |
1871 | 135 | bool ShouldSwap = false; |
1872 | 135 | if (Sub && Value == 010 ) { |
1873 | 10 | ShouldSwap = SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && |
1874 | 10 | Sub->getOperand(2).getReg() == SrcReg; |
1875 | 10 | |
1876 | 10 | // The operands to subf are the opposite of sub, so only in the fixed-point |
1877 | 10 | // case, invert the order. |
1878 | 10 | ShouldSwap = !ShouldSwap; |
1879 | 10 | } |
1880 | 135 | |
1881 | 135 | if (ShouldSwap) |
1882 | 0 | for (MachineRegisterInfo::use_instr_iterator |
1883 | 0 | I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end(); |
1884 | 0 | I != IE; ++I) { |
1885 | 0 | MachineInstr *UseMI = &*I; |
1886 | 0 | if (UseMI->getOpcode() == PPC::BCC) { |
1887 | 0 | PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm(); |
1888 | 0 | unsigned PredCond = PPC::getPredicateCondition(Pred); |
1889 | 0 | assert((!equalityOnly || |
1890 | 0 | PredCond == PPC::PRED_EQ || PredCond == PPC::PRED_NE) && |
1891 | 0 | "Invalid predicate for equality-only optimization"); |
1892 | 0 | (void)PredCond; // To suppress warning in release build. |
1893 | 0 | PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)), |
1894 | 0 | PPC::getSwappedPredicate(Pred))); |
1895 | 0 | } else if (UseMI->getOpcode() == PPC::ISEL || |
1896 | 0 | UseMI->getOpcode() == PPC::ISEL8) { |
1897 | 0 | unsigned NewSubReg = UseMI->getOperand(3).getSubReg(); |
1898 | 0 | assert((!equalityOnly || NewSubReg == PPC::sub_eq) && |
1899 | 0 | "Invalid CR bit for equality-only optimization"); |
1900 | 0 |
|
1901 | 0 | if (NewSubReg == PPC::sub_lt) |
1902 | 0 | NewSubReg = PPC::sub_gt; |
1903 | 0 | else if (NewSubReg == PPC::sub_gt) |
1904 | 0 | NewSubReg = PPC::sub_lt; |
1905 | 0 |
|
1906 | 0 | SubRegsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(3)), |
1907 | 0 | NewSubReg)); |
1908 | 0 | } else // We need to abort on a user we don't understand. |
1909 | 0 | return false; |
1910 | 0 | } |
1911 | 135 | assert(!(Value != 0 && ShouldSwap) && |
1912 | 135 | "Non-zero immediate support and ShouldSwap" |
1913 | 135 | "may conflict in updating predicate"); |
1914 | 135 | |
1915 | 135 | // Create a new virtual register to hold the value of the CR set by the |
1916 | 135 | // record-form instruction. If the instruction was not previously in |
1917 | 135 | // record form, then set the kill flag on the CR. |
1918 | 135 | CmpInstr.eraseFromParent(); |
1919 | 135 | |
1920 | 135 | MachineBasicBlock::iterator MII = MI; |
1921 | 135 | BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(), |
1922 | 135 | get(TargetOpcode::COPY), CRReg) |
1923 | 135 | .addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill128 : 07 ); |
1924 | 135 | |
1925 | 135 | // Even if CR0 register were dead before, it is alive now since the |
1926 | 135 | // instruction we just built uses it. |
1927 | 135 | MI->clearRegisterDeads(PPC::CR0); |
1928 | 135 | |
1929 | 135 | if (MIOpC != NewOpC) { |
1930 | 128 | // We need to be careful here: we're replacing one instruction with |
1931 | 128 | // another, and we need to make sure that we get all of the right |
1932 | 128 | // implicit uses and defs. On the other hand, the caller may be holding |
1933 | 128 | // an iterator to this instruction, and so we can't delete it (this is |
1934 | 128 | // specifically the case if this is the instruction directly after the |
1935 | 128 | // compare). |
1936 | 128 | |
1937 | 128 | // Rotates are expensive instructions. If we're emitting a record-form |
1938 | 128 | // rotate that can just be an andi/andis, we should just emit that. |
1939 | 128 | if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM860 ) { |
1940 | 70 | unsigned GPRRes = MI->getOperand(0).getReg(); |
1941 | 70 | int64_t SH = MI->getOperand(2).getImm(); |
1942 | 70 | int64_t MB = MI->getOperand(3).getImm(); |
1943 | 70 | int64_t ME = MI->getOperand(4).getImm(); |
1944 | 70 | // We can only do this if both the start and end of the mask are in the |
1945 | 70 | // same halfword. |
1946 | 70 | bool MBInLoHWord = MB >= 16; |
1947 | 70 | bool MEInLoHWord = ME >= 16; |
1948 | 70 | uint64_t Mask = ~0LLU; |
1949 | 70 | |
1950 | 70 | if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 069 ) { |
1951 | 66 | Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1); |
1952 | 66 | // The mask value needs to shift right 16 if we're emitting andis. |
1953 | 66 | Mask >>= MBInLoHWord ? 062 : 164 ; |
1954 | 66 | NewOpC = MIOpC == PPC::RLWINM ? |
1955 | 66 | (MBInLoHWord ? PPC::ANDIo62 : PPC::ANDISo4 ) : |
1956 | 66 | (MBInLoHWord 0 ? PPC::ANDIo80 :PPC::ANDISo80 ); |
1957 | 66 | } else if (4 MRI->use_empty(GPRRes)4 && (ME == 31)3 && |
1958 | 4 | (ME - MB + 1 == SH)2 && (MB >= 16)1 ) { |
1959 | 1 | // If we are rotating by the exact number of bits as are in the mask |
1960 | 1 | // and the mask is in the least significant bits of the register, |
1961 | 1 | // that's just an andis. (as long as the GPR result has no uses). |
1962 | 1 | Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1); |
1963 | 1 | Mask >>= 16; |
1964 | 1 | NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDISo :PPC::ANDISo80 ; |
1965 | 1 | } |
1966 | 70 | // If we've set the mask, we can transform. |
1967 | 70 | if (Mask != ~0LLU) { |
1968 | 67 | MI->RemoveOperand(4); |
1969 | 67 | MI->RemoveOperand(3); |
1970 | 67 | MI->getOperand(2).setImm(Mask); |
1971 | 67 | NumRcRotatesConvertedToRcAnd++; |
1972 | 67 | } |
1973 | 70 | } else if (58 MIOpC == PPC::RLDICL58 && MI->getOperand(2).getImm() == 03 ) { |
1974 | 1 | int64_t MB = MI->getOperand(3).getImm(); |
1975 | 1 | if (MB >= 48) { |
1976 | 1 | uint64_t Mask = (1LLU << (63 - MB + 1)) - 1; |
1977 | 1 | NewOpC = PPC::ANDIo8; |
1978 | 1 | MI->RemoveOperand(3); |
1979 | 1 | MI->getOperand(2).setImm(Mask); |
1980 | 1 | NumRcRotatesConvertedToRcAnd++; |
1981 | 1 | } |
1982 | 1 | } |
1983 | 128 | |
1984 | 128 | const MCInstrDesc &NewDesc = get(NewOpC); |
1985 | 128 | MI->setDesc(NewDesc); |
1986 | 128 | |
1987 | 128 | if (NewDesc.ImplicitDefs) |
1988 | 128 | for (const MCPhysReg *ImpDefs = NewDesc.getImplicitDefs(); |
1989 | 258 | *ImpDefs; ++ImpDefs130 ) |
1990 | 130 | if (!MI->definesRegister(*ImpDefs)) |
1991 | 128 | MI->addOperand(*MI->getParent()->getParent(), |
1992 | 128 | MachineOperand::CreateReg(*ImpDefs, true, true)); |
1993 | 128 | if (NewDesc.ImplicitUses) |
1994 | 1 | for (const MCPhysReg *ImpUses = NewDesc.getImplicitUses(); |
1995 | 2 | *ImpUses; ++ImpUses1 ) |
1996 | 1 | if (!MI->readsRegister(*ImpUses)) |
1997 | 0 | MI->addOperand(*MI->getParent()->getParent(), |
1998 | 0 | MachineOperand::CreateReg(*ImpUses, false, true)); |
1999 | 128 | } |
2000 | 135 | assert(MI->definesRegister(PPC::CR0) && |
2001 | 135 | "Record-form instruction does not define cr0?"); |
2002 | 135 | |
2003 | 135 | // Modify the condition code of operands in OperandsToUpdate. |
2004 | 135 | // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to |
2005 | 135 | // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. |
2006 | 138 | for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++3 ) |
2007 | 3 | PredsToUpdate[i].first->setImm(PredsToUpdate[i].second); |
2008 | 135 | |
2009 | 135 | for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++0 ) |
2010 | 0 | SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second); |
2011 | 135 | |
2012 | 135 | return true; |
2013 | 135 | } |
2014 | | |
2015 | | /// GetInstSize - Return the number of bytes of code the specified |
2016 | | /// instruction may be. This returns the maximum number of bytes. |
2017 | | /// |
2018 | 111k | unsigned PPCInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { |
2019 | 111k | unsigned Opcode = MI.getOpcode(); |
2020 | 111k | |
2021 | 111k | if (Opcode == PPC::INLINEASM || Opcode == PPC::INLINEASM_BR111k ) { |
2022 | 420 | const MachineFunction *MF = MI.getParent()->getParent(); |
2023 | 420 | const char *AsmStr = MI.getOperand(0).getSymbolName(); |
2024 | 420 | return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); |
2025 | 111k | } else if (Opcode == TargetOpcode::STACKMAP) { |
2026 | 17 | StackMapOpers Opers(&MI); |
2027 | 17 | return Opers.getNumPatchBytes(); |
2028 | 111k | } else if (Opcode == TargetOpcode::PATCHPOINT) { |
2029 | 40 | PatchPointOpers Opers(&MI); |
2030 | 40 | return Opers.getNumPatchBytes(); |
2031 | 111k | } else { |
2032 | 111k | return get(Opcode).getSize(); |
2033 | 111k | } |
2034 | 111k | } |
2035 | | |
2036 | | std::pair<unsigned, unsigned> |
2037 | 6 | PPCInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { |
2038 | 6 | const unsigned Mask = PPCII::MO_ACCESS_MASK; |
2039 | 6 | return std::make_pair(TF & Mask, TF & ~Mask); |
2040 | 6 | } |
2041 | | |
2042 | | ArrayRef<std::pair<unsigned, const char *>> |
2043 | 8 | PPCInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { |
2044 | 8 | using namespace PPCII; |
2045 | 8 | static const std::pair<unsigned, const char *> TargetFlags[] = { |
2046 | 8 | {MO_LO, "ppc-lo"}, |
2047 | 8 | {MO_HA, "ppc-ha"}, |
2048 | 8 | {MO_TPREL_LO, "ppc-tprel-lo"}, |
2049 | 8 | {MO_TPREL_HA, "ppc-tprel-ha"}, |
2050 | 8 | {MO_DTPREL_LO, "ppc-dtprel-lo"}, |
2051 | 8 | {MO_TLSLD_LO, "ppc-tlsld-lo"}, |
2052 | 8 | {MO_TOC_LO, "ppc-toc-lo"}, |
2053 | 8 | {MO_TLS, "ppc-tls"}}; |
2054 | 8 | return makeArrayRef(TargetFlags); |
2055 | 8 | } |
2056 | | |
2057 | | ArrayRef<std::pair<unsigned, const char *>> |
2058 | 1 | PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { |
2059 | 1 | using namespace PPCII; |
2060 | 1 | static const std::pair<unsigned, const char *> TargetFlags[] = { |
2061 | 1 | {MO_PLT, "ppc-plt"}, |
2062 | 1 | {MO_PIC_FLAG, "ppc-pic"}, |
2063 | 1 | {MO_NLP_FLAG, "ppc-nlp"}, |
2064 | 1 | {MO_NLP_HIDDEN_FLAG, "ppc-nlp-hidden"}}; |
2065 | 1 | return makeArrayRef(TargetFlags); |
2066 | 1 | } |
2067 | | |
2068 | | // Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction. |
2069 | | // The VSX versions have the advantage of a full 64-register target whereas |
2070 | | // the FP ones have the advantage of lower latency and higher throughput. So |
2071 | | // what we are after is using the faster instructions in low register pressure |
2072 | | // situations and using the larger register file in high register pressure |
2073 | | // situations. |
2074 | 1.58k | bool PPCInstrInfo::expandVSXMemPseudo(MachineInstr &MI) const { |
2075 | 1.58k | unsigned UpperOpcode, LowerOpcode; |
2076 | 1.58k | switch (MI.getOpcode()) { |
2077 | 1.58k | case PPC::DFLOADf32: |
2078 | 297 | UpperOpcode = PPC::LXSSP; |
2079 | 297 | LowerOpcode = PPC::LFS; |
2080 | 297 | break; |
2081 | 1.58k | case PPC::DFLOADf64: |
2082 | 224 | UpperOpcode = PPC::LXSD; |
2083 | 224 | LowerOpcode = PPC::LFD; |
2084 | 224 | break; |
2085 | 1.58k | case PPC::DFSTOREf32: |
2086 | 45 | UpperOpcode = PPC::STXSSP; |
2087 | 45 | LowerOpcode = PPC::STFS; |
2088 | 45 | break; |
2089 | 1.58k | case PPC::DFSTOREf64: |
2090 | 25 | UpperOpcode = PPC::STXSD; |
2091 | 25 | LowerOpcode = PPC::STFD; |
2092 | 25 | break; |
2093 | 1.58k | case PPC::XFLOADf32: |
2094 | 327 | UpperOpcode = PPC::LXSSPX; |
2095 | 327 | LowerOpcode = PPC::LFSX; |
2096 | 327 | break; |
2097 | 1.58k | case PPC::XFLOADf64: |
2098 | 270 | UpperOpcode = PPC::LXSDX; |
2099 | 270 | LowerOpcode = PPC::LFDX; |
2100 | 270 | break; |
2101 | 1.58k | case PPC::XFSTOREf32: |
2102 | 46 | UpperOpcode = PPC::STXSSPX; |
2103 | 46 | LowerOpcode = PPC::STFSX; |
2104 | 46 | break; |
2105 | 1.58k | case PPC::XFSTOREf64: |
2106 | 109 | UpperOpcode = PPC::STXSDX; |
2107 | 109 | LowerOpcode = PPC::STFDX; |
2108 | 109 | break; |
2109 | 1.58k | case PPC::LIWAX: |
2110 | 42 | UpperOpcode = PPC::LXSIWAX; |
2111 | 42 | LowerOpcode = PPC::LFIWAX; |
2112 | 42 | break; |
2113 | 1.58k | case PPC::LIWZX: |
2114 | 79 | UpperOpcode = PPC::LXSIWZX; |
2115 | 79 | LowerOpcode = PPC::LFIWZX; |
2116 | 79 | break; |
2117 | 1.58k | case PPC::STIWX: |
2118 | 120 | UpperOpcode = PPC::STXSIWX; |
2119 | 120 | LowerOpcode = PPC::STFIWX; |
2120 | 120 | break; |
2121 | 1.58k | default: |
2122 | 0 | llvm_unreachable("Unknown Operation!"); |
2123 | 1.58k | } |
2124 | 1.58k | |
2125 | 1.58k | unsigned TargetReg = MI.getOperand(0).getReg(); |
2126 | 1.58k | unsigned Opcode; |
2127 | 1.58k | if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) || |
2128 | 1.58k | (57 TargetReg >= PPC::VSL057 && TargetReg <= PPC::VSL310 )) |
2129 | 1.52k | Opcode = LowerOpcode; |
2130 | 57 | else |
2131 | 57 | Opcode = UpperOpcode; |
2132 | 1.58k | MI.setDesc(get(Opcode)); |
2133 | 1.58k | return true; |
2134 | 1.58k | } |
2135 | | |
2136 | 1.00k | static bool isAnImmediateOperand(const MachineOperand &MO) { |
2137 | 1.00k | return MO.isCPI() || MO.isGlobal()257 || MO.isImm()144 ; |
2138 | 1.00k | } |
2139 | | |
2140 | 9.84k | bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
2141 | 9.84k | auto &MBB = *MI.getParent(); |
2142 | 9.84k | auto DL = MI.getDebugLoc(); |
2143 | 9.84k | |
2144 | 9.84k | switch (MI.getOpcode()) { |
2145 | 9.84k | case TargetOpcode::LOAD_STACK_GUARD: { |
2146 | 6 | assert(Subtarget.isTargetLinux() && |
2147 | 6 | "Only Linux target is expected to contain LOAD_STACK_GUARD"); |
2148 | 6 | const int64_t Offset = Subtarget.isPPC64() ? -0x70104 : -0x70082 ; |
2149 | 6 | const unsigned Reg = Subtarget.isPPC64() ? PPC::X134 : PPC::R22 ; |
2150 | 6 | MI.setDesc(get(Subtarget.isPPC64() ? PPC::LD4 : PPC::LWZ2 )); |
2151 | 6 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
2152 | 6 | .addImm(Offset) |
2153 | 6 | .addReg(Reg); |
2154 | 6 | return true; |
2155 | 9.84k | } |
2156 | 9.84k | case PPC::DFLOADf32: |
2157 | 591 | case PPC::DFLOADf64: |
2158 | 591 | case PPC::DFSTOREf32: |
2159 | 591 | case PPC::DFSTOREf64: { |
2160 | 591 | assert(Subtarget.hasP9Vector() && |
2161 | 591 | "Invalid D-Form Pseudo-ops on Pre-P9 target."); |
2162 | 591 | assert(MI.getOperand(2).isReg() && |
2163 | 591 | isAnImmediateOperand(MI.getOperand(1)) && |
2164 | 591 | "D-form op must have register and immediate operands"); |
2165 | 591 | return expandVSXMemPseudo(MI); |
2166 | 591 | } |
2167 | 614 | case PPC::XFLOADf32: |
2168 | 614 | case PPC::XFSTOREf32: |
2169 | 614 | case PPC::LIWAX: |
2170 | 614 | case PPC::LIWZX: |
2171 | 614 | case PPC::STIWX: { |
2172 | 614 | assert(Subtarget.hasP8Vector() && |
2173 | 614 | "Invalid X-Form Pseudo-ops on Pre-P8 target."); |
2174 | 614 | assert(MI.getOperand(2).isReg() && MI.getOperand(1).isReg() && |
2175 | 614 | "X-form op must have register and register operands"); |
2176 | 614 | return expandVSXMemPseudo(MI); |
2177 | 614 | } |
2178 | 614 | case PPC::XFLOADf64: |
2179 | 379 | case PPC::XFSTOREf64: { |
2180 | 379 | assert(Subtarget.hasVSX() && |
2181 | 379 | "Invalid X-Form Pseudo-ops on target that has no VSX."); |
2182 | 379 | assert(MI.getOperand(2).isReg() && MI.getOperand(1).isReg() && |
2183 | 379 | "X-form op must have register and register operands"); |
2184 | 379 | return expandVSXMemPseudo(MI); |
2185 | 379 | } |
2186 | 379 | case PPC::SPILLTOVSR_LD: { |
2187 | 0 | unsigned TargetReg = MI.getOperand(0).getReg(); |
2188 | 0 | if (PPC::VSFRCRegClass.contains(TargetReg)) { |
2189 | 0 | MI.setDesc(get(PPC::DFLOADf64)); |
2190 | 0 | return expandPostRAPseudo(MI); |
2191 | 0 | } |
2192 | 0 | else |
2193 | 0 | MI.setDesc(get(PPC::LD)); |
2194 | 0 | return true; |
2195 | 0 | } |
2196 | 0 | case PPC::SPILLTOVSR_ST: { |
2197 | 0 | unsigned SrcReg = MI.getOperand(0).getReg(); |
2198 | 0 | if (PPC::VSFRCRegClass.contains(SrcReg)) { |
2199 | 0 | NumStoreSPILLVSRRCAsVec++; |
2200 | 0 | MI.setDesc(get(PPC::DFSTOREf64)); |
2201 | 0 | return expandPostRAPseudo(MI); |
2202 | 0 | } else { |
2203 | 0 | NumStoreSPILLVSRRCAsGpr++; |
2204 | 0 | MI.setDesc(get(PPC::STD)); |
2205 | 0 | } |
2206 | 0 | return true; |
2207 | 0 | } |
2208 | 0 | case PPC::SPILLTOVSR_LDX: { |
2209 | 0 | unsigned TargetReg = MI.getOperand(0).getReg(); |
2210 | 0 | if (PPC::VSFRCRegClass.contains(TargetReg)) |
2211 | 0 | MI.setDesc(get(PPC::LXSDX)); |
2212 | 0 | else |
2213 | 0 | MI.setDesc(get(PPC::LDX)); |
2214 | 0 | return true; |
2215 | 0 | } |
2216 | 0 | case PPC::SPILLTOVSR_STX: { |
2217 | 0 | unsigned SrcReg = MI.getOperand(0).getReg(); |
2218 | 0 | if (PPC::VSFRCRegClass.contains(SrcReg)) { |
2219 | 0 | NumStoreSPILLVSRRCAsVec++; |
2220 | 0 | MI.setDesc(get(PPC::STXSDX)); |
2221 | 0 | } else { |
2222 | 0 | NumStoreSPILLVSRRCAsGpr++; |
2223 | 0 | MI.setDesc(get(PPC::STDX)); |
2224 | 0 | } |
2225 | 0 | return true; |
2226 | 0 | } |
2227 | 0 |
|
2228 | 23 | case PPC::CFENCE8: { |
2229 | 23 | auto Val = MI.getOperand(0).getReg(); |
2230 | 23 | BuildMI(MBB, MI, DL, get(PPC::CMPD), PPC::CR7).addReg(Val).addReg(Val); |
2231 | 23 | BuildMI(MBB, MI, DL, get(PPC::CTRL_DEP)) |
2232 | 23 | .addImm(PPC::PRED_NE_MINUS) |
2233 | 23 | .addReg(PPC::CR7) |
2234 | 23 | .addImm(1); |
2235 | 23 | MI.setDesc(get(PPC::ISYNC)); |
2236 | 23 | MI.RemoveOperand(0); |
2237 | 23 | return true; |
2238 | 8.23k | } |
2239 | 8.23k | } |
2240 | 8.23k | return false; |
2241 | 8.23k | } |
2242 | | |
2243 | | // Essentially a compile-time implementation of a compare->isel sequence. |
2244 | | // It takes two constants to compare, along with the true/false registers |
2245 | | // and the comparison type (as a subreg to a CR field) and returns one |
2246 | | // of the true/false registers, depending on the comparison results. |
2247 | | static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc, |
2248 | | unsigned TrueReg, unsigned FalseReg, |
2249 | 8 | unsigned CRSubReg) { |
2250 | 8 | // Signed comparisons. The immediates are assumed to be sign-extended. |
2251 | 8 | if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI5 ) { |
2252 | 5 | switch (CRSubReg) { |
2253 | 5 | default: 0 llvm_unreachable0 ("Unknown integer comparison type."); |
2254 | 5 | case PPC::sub_lt: |
2255 | 2 | return Imm1 < Imm2 ? TrueReg0 : FalseReg; |
2256 | 5 | case PPC::sub_gt: |
2257 | 3 | return Imm1 > Imm2 ? TrueReg1 : FalseReg2 ; |
2258 | 5 | case PPC::sub_eq: |
2259 | 0 | return Imm1 == Imm2 ? TrueReg : FalseReg; |
2260 | 3 | } |
2261 | 3 | } |
2262 | 3 | // Unsigned comparisons. |
2263 | 3 | else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI2 ) { |
2264 | 3 | switch (CRSubReg) { |
2265 | 3 | default: 0 llvm_unreachable0 ("Unknown integer comparison type."); |
2266 | 3 | case PPC::sub_lt: |
2267 | 0 | return (uint64_t)Imm1 < (uint64_t)Imm2 ? TrueReg : FalseReg; |
2268 | 3 | case PPC::sub_gt: |
2269 | 2 | return (uint64_t)Imm1 > (uint64_t)Imm2 ? TrueReg1 : FalseReg1 ; |
2270 | 3 | case PPC::sub_eq: |
2271 | 1 | return Imm1 == Imm2 ? TrueReg : FalseReg0 ; |
2272 | 0 | } |
2273 | 0 | } |
2274 | 0 | return PPC::NoRegister; |
2275 | 0 | } |
2276 | | |
2277 | | void PPCInstrInfo::replaceInstrOperandWithImm(MachineInstr &MI, |
2278 | | unsigned OpNo, |
2279 | 490 | int64_t Imm) const { |
2280 | 490 | assert(MI.getOperand(OpNo).isReg() && "Operand must be a REG"); |
2281 | 490 | // Replace the REG with the Immediate. |
2282 | 490 | unsigned InUseReg = MI.getOperand(OpNo).getReg(); |
2283 | 490 | MI.getOperand(OpNo).ChangeToImmediate(Imm); |
2284 | 490 | |
2285 | 490 | if (empty(MI.implicit_operands())) |
2286 | 217 | return; |
2287 | 273 | |
2288 | 273 | // We need to make sure that the MI didn't have any implicit use |
2289 | 273 | // of this REG any more. |
2290 | 273 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2291 | 273 | int UseOpIdx = MI.findRegisterUseOperandIdx(InUseReg, false, TRI); |
2292 | 273 | if (UseOpIdx >= 0) { |
2293 | 1 | MachineOperand &MO = MI.getOperand(UseOpIdx); |
2294 | 1 | if (MO.isImplicit()) |
2295 | 1 | // The operands must always be in the following order: |
2296 | 1 | // - explicit reg defs, |
2297 | 1 | // - other explicit operands (reg uses, immediates, etc.), |
2298 | 1 | // - implicit reg defs |
2299 | 1 | // - implicit reg uses |
2300 | 1 | // Therefore, removing the implicit operand won't change the explicit |
2301 | 1 | // operands layout. |
2302 | 1 | MI.RemoveOperand(UseOpIdx); |
2303 | 1 | } |
2304 | 273 | } |
2305 | | |
2306 | | // Replace an instruction with one that materializes a constant (and sets |
2307 | | // CR0 if the original instruction was a record-form instruction). |
2308 | | void PPCInstrInfo::replaceInstrWithLI(MachineInstr &MI, |
2309 | 71 | const LoadImmediateInfo &LII) const { |
2310 | 71 | // Remove existing operands. |
2311 | 71 | int OperandToKeep = LII.SetCR ? 126 : 045 ; |
2312 | 269 | for (int i = MI.getNumOperands() - 1; i > OperandToKeep; i--198 ) |
2313 | 198 | MI.RemoveOperand(i); |
2314 | 71 | |
2315 | 71 | // Replace the instruction. |
2316 | 71 | if (LII.SetCR) { |
2317 | 26 | MI.setDesc(get(LII.Is64Bit ? PPC::ANDIo815 : PPC::ANDIo11 )); |
2318 | 26 | // Set the immediate. |
2319 | 26 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
2320 | 26 | .addImm(LII.Imm).addReg(PPC::CR0, RegState::ImplicitDefine); |
2321 | 26 | return; |
2322 | 26 | } |
2323 | 45 | else |
2324 | 45 | MI.setDesc(get(LII.Is64Bit ? PPC::LI829 : PPC::LI16 )); |
2325 | 71 | |
2326 | 71 | // Set the immediate. |
2327 | 71 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
2328 | 45 | .addImm(LII.Imm); |
2329 | 45 | } |
2330 | | |
2331 | | MachineInstr *PPCInstrInfo::getForwardingDefMI( |
2332 | | MachineInstr &MI, |
2333 | | unsigned &OpNoForForwarding, |
2334 | 238k | bool &SeenIntermediateUse) const { |
2335 | 238k | OpNoForForwarding = ~0U; |
2336 | 238k | MachineInstr *DefMI = nullptr; |
2337 | 238k | MachineRegisterInfo *MRI = &MI.getParent()->getParent()->getRegInfo(); |
2338 | 238k | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2339 | 238k | // If we're in SSA, get the defs through the MRI. Otherwise, only look |
2340 | 238k | // within the basic block to see if the register is defined using an LI/LI8. |
2341 | 238k | if (MRI->isSSA()) { |
2342 | 373k | for (int i = 1, e = MI.getNumOperands(); i < e; i++238k ) { |
2343 | 242k | if (!MI.getOperand(i).isReg()) |
2344 | 60.2k | continue; |
2345 | 181k | unsigned Reg = MI.getOperand(i).getReg(); |
2346 | 181k | if (!TargetRegisterInfo::isVirtualRegister(Reg)) |
2347 | 82.6k | continue; |
2348 | 99.1k | unsigned TrueReg = TRI->lookThruCopyLike(Reg, MRI); |
2349 | 99.1k | if (TargetRegisterInfo::isVirtualRegister(TrueReg)) { |
2350 | 70.2k | DefMI = MRI->getVRegDef(TrueReg); |
2351 | 70.2k | if (DefMI->getOpcode() == PPC::LI || DefMI->getOpcode() == PPC::LI869.2k ) { |
2352 | 3.30k | OpNoForForwarding = i; |
2353 | 3.30k | break; |
2354 | 3.30k | } |
2355 | 70.2k | } |
2356 | 99.1k | } |
2357 | 134k | } else { |
2358 | 103k | // Looking back through the definition for each operand could be expensive, |
2359 | 103k | // so exit early if this isn't an instruction that either has an immediate |
2360 | 103k | // form or is already an immediate form that we can handle. |
2361 | 103k | ImmInstrInfo III; |
2362 | 103k | unsigned Opc = MI.getOpcode(); |
2363 | 103k | bool ConvertibleImmForm = |
2364 | 103k | Opc == PPC::CMPWI || Opc == PPC::CMPLWI103k || |
2365 | 103k | Opc == PPC::CMPDI103k || Opc == PPC::CMPLDI103k || |
2366 | 103k | Opc == PPC::ADDI103k || Opc == PPC::ADDI8101k || |
2367 | 103k | Opc == PPC::ORI99.1k || Opc == PPC::ORI898.9k || |
2368 | 103k | Opc == PPC::XORI98.7k || Opc == PPC::XORI898.5k || |
2369 | 103k | Opc == PPC::RLDICL98.4k || Opc == PPC::RLDICLo97.3k || |
2370 | 103k | Opc == PPC::RLDICL_3297.3k || Opc == PPC::RLDICL_32_6497.3k || |
2371 | 103k | Opc == PPC::RLWINM97.2k || Opc == PPC::RLWINMo95.9k || |
2372 | 103k | Opc == PPC::RLWINM895.9k || Opc == PPC::RLWINM8o95.8k ; |
2373 | 103k | if (!instrHasImmForm(MI, III, true) && !ConvertibleImmForm96.7k ) |
2374 | 88.8k | return nullptr; |
2375 | 15.0k | |
2376 | 15.0k | // Don't convert or %X, %Y, %Y since that's just a register move. |
2377 | 15.0k | if ((Opc == PPC::OR || Opc == PPC::OR814.3k ) && |
2378 | 15.0k | MI.getOperand(1).getReg() == MI.getOperand(2).getReg()2.51k ) |
2379 | 2.27k | return nullptr; |
2380 | 42.3k | for (int i = 1, e = MI.getNumOperands(); 12.7k i < e; i++29.5k ) { |
2381 | 31.3k | MachineOperand &MO = MI.getOperand(i); |
2382 | 31.3k | SeenIntermediateUse = false; |
2383 | 31.3k | if (MO.isReg() && MO.isUse()19.8k && !MO.isImplicit()18.9k ) { |
2384 | 17.4k | MachineBasicBlock::reverse_iterator E = MI.getParent()->rend(), It = MI; |
2385 | 17.4k | It++; |
2386 | 17.4k | unsigned Reg = MI.getOperand(i).getReg(); |
2387 | 17.4k | |
2388 | 17.4k | // Is this register defined by some form of add-immediate (including |
2389 | 17.4k | // load-immediate) within this basic block? |
2390 | 77.5k | for ( ; It != E; ++It60.0k ) { |
2391 | 70.1k | if (It->modifiesRegister(Reg, &getRegisterInfo())) { |
2392 | 10.0k | switch (It->getOpcode()) { |
2393 | 10.0k | default: break8.32k ; |
2394 | 10.0k | case PPC::LI: |
2395 | 1.77k | case PPC::LI8: |
2396 | 1.77k | case PPC::ADDItocL: |
2397 | 1.77k | case PPC::ADDI: |
2398 | 1.77k | case PPC::ADDI8: |
2399 | 1.77k | OpNoForForwarding = i; |
2400 | 1.77k | return &*It; |
2401 | 8.32k | } |
2402 | 8.32k | break; |
2403 | 60.0k | } else if (It->readsRegister(Reg, &getRegisterInfo())) |
2404 | 9.92k | // If we see another use of this reg between the def and the MI, |
2405 | 9.92k | // we want to flat it so the def isn't deleted. |
2406 | 9.92k | SeenIntermediateUse = true; |
2407 | 70.1k | } |
2408 | 17.4k | } |
2409 | 31.3k | } |
2410 | 12.7k | } |
2411 | 238k | return OpNoForForwarding == ~0U 145k ? nullptr142k : DefMI3.30k ; |
2412 | 238k | } |
2413 | | |
2414 | 12.7k | const unsigned *PPCInstrInfo::getStoreOpcodesForSpillArray() const { |
2415 | 12.7k | static const unsigned OpcodesForSpill[2][SOK_LastOpcodeSpill] = { |
2416 | 12.7k | // Power 8 |
2417 | 12.7k | {PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, |
2418 | 12.7k | PPC::SPILL_CRBIT, PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX, |
2419 | 12.7k | PPC::SPILL_VRSAVE, PPC::QVSTFDX, PPC::QVSTFSXs, PPC::QVSTFDXb, |
2420 | 12.7k | PPC::SPILLTOVSR_ST, PPC::EVSTDD, PPC::SPESTW}, |
2421 | 12.7k | // Power 9 |
2422 | 12.7k | {PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, |
2423 | 12.7k | PPC::SPILL_CRBIT, PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, |
2424 | 12.7k | PPC::SPILL_VRSAVE, PPC::QVSTFDX, PPC::QVSTFSXs, PPC::QVSTFDXb, |
2425 | 12.7k | PPC::SPILLTOVSR_ST}}; |
2426 | 12.7k | |
2427 | 12.7k | return OpcodesForSpill[(Subtarget.hasP9Vector()) ? 11.35k : 011.4k ]; |
2428 | 12.7k | } |
2429 | | |
2430 | 14.5k | const unsigned *PPCInstrInfo::getLoadOpcodesForSpillArray() const { |
2431 | 14.5k | static const unsigned OpcodesForSpill[2][SOK_LastOpcodeSpill] = { |
2432 | 14.5k | // Power 8 |
2433 | 14.5k | {PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, |
2434 | 14.5k | PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX, |
2435 | 14.5k | PPC::RESTORE_VRSAVE, PPC::QVLFDX, PPC::QVLFSXs, PPC::QVLFDXb, |
2436 | 14.5k | PPC::SPILLTOVSR_LD, PPC::EVLDD, PPC::SPELWZ}, |
2437 | 14.5k | // Power 9 |
2438 | 14.5k | {PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, |
2439 | 14.5k | PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, PPC::DFLOADf32, |
2440 | 14.5k | PPC::RESTORE_VRSAVE, PPC::QVLFDX, PPC::QVLFSXs, PPC::QVLFDXb, |
2441 | 14.5k | PPC::SPILLTOVSR_LD}}; |
2442 | 14.5k | |
2443 | 14.5k | return OpcodesForSpill[(Subtarget.hasP9Vector()) ? 11.60k : 012.9k ]; |
2444 | 14.5k | } |
2445 | | |
2446 | | void PPCInstrInfo::fixupIsDeadOrKill(MachineInstr &StartMI, MachineInstr &EndMI, |
2447 | 1.07k | unsigned RegNo) const { |
2448 | 1.07k | const MachineRegisterInfo &MRI = |
2449 | 1.07k | StartMI.getParent()->getParent()->getRegInfo(); |
2450 | 1.07k | if (MRI.isSSA()) |
2451 | 4 | return; |
2452 | 1.07k | |
2453 | 1.07k | // Instructions between [StartMI, EndMI] should be in same basic block. |
2454 | 1.07k | assert((StartMI.getParent() == EndMI.getParent()) && |
2455 | 1.07k | "Instructions are not in same basic block"); |
2456 | 1.07k | |
2457 | 1.07k | bool IsKillSet = false; |
2458 | 1.07k | |
2459 | 9.01k | auto clearOperandKillInfo = [=] (MachineInstr &MI, unsigned Index) { |
2460 | 9.01k | MachineOperand &MO = MI.getOperand(Index); |
2461 | 9.01k | if (MO.isReg() && MO.isUse()6.54k && MO.isKill()3.63k && |
2462 | 9.01k | getRegisterInfo().regsOverlap(MO.getReg(), RegNo)1.33k ) |
2463 | 365 | MO.setIsKill(false); |
2464 | 9.01k | }; |
2465 | 1.07k | |
2466 | 1.07k | // Set killed flag for EndMI. |
2467 | 1.07k | // No need to do anything if EndMI defines RegNo. |
2468 | 1.07k | int UseIndex = |
2469 | 1.07k | EndMI.findRegisterUseOperandIdx(RegNo, false, &getRegisterInfo()); |
2470 | 1.07k | if (UseIndex != -1) { |
2471 | 709 | EndMI.getOperand(UseIndex).setIsKill(true); |
2472 | 709 | IsKillSet = true; |
2473 | 709 | // Clear killed flag for other EndMI operands related to RegNo. In some |
2474 | 709 | // upexpected cases, killed may be set multiple times for same register |
2475 | 709 | // operand in same MI. |
2476 | 3.12k | for (int i = 0, e = EndMI.getNumOperands(); i != e; ++i2.41k ) |
2477 | 2.41k | if (i != UseIndex) |
2478 | 1.70k | clearOperandKillInfo(EndMI, i); |
2479 | 709 | } |
2480 | 1.07k | |
2481 | 1.07k | // Walking the inst in reverse order (EndMI -> StartMI]. |
2482 | 1.07k | MachineBasicBlock::reverse_iterator It = EndMI; |
2483 | 1.07k | MachineBasicBlock::reverse_iterator E = EndMI.getParent()->rend(); |
2484 | 1.07k | // EndMI has been handled above, skip it here. |
2485 | 1.07k | It++; |
2486 | 1.07k | MachineOperand *MO = nullptr; |
2487 | 2.10k | for (; It != E; ++It1.03k ) { |
2488 | 2.10k | // Skip insturctions which could not be a def/use of RegNo. |
2489 | 2.10k | if (It->isDebugInstr() || It->isPosition()) |
2490 | 0 | continue; |
2491 | 2.10k | |
2492 | 2.10k | // Clear killed flag for all It operands related to RegNo. In some |
2493 | 2.10k | // upexpected cases, killed may be set multiple times for same register |
2494 | 2.10k | // operand in same MI. |
2495 | 9.42k | for (int i = 0, e = It->getNumOperands(); 2.10k i != e; ++i7.31k ) |
2496 | 7.31k | clearOperandKillInfo(*It, i); |
2497 | 2.10k | |
2498 | 2.10k | // If killed is not set, set killed for its last use or set dead for its def |
2499 | 2.10k | // if no use found. |
2500 | 2.10k | if (!IsKillSet) { |
2501 | 914 | if ((MO = It->findRegisterUseOperand(RegNo, false, &getRegisterInfo()))) { |
2502 | 5 | // Use found, set it killed. |
2503 | 5 | IsKillSet = true; |
2504 | 5 | MO->setIsKill(true); |
2505 | 5 | continue; |
2506 | 909 | } else if ((MO = It->findRegisterDefOperand(RegNo, false, true, |
2507 | 909 | &getRegisterInfo()))) { |
2508 | 357 | // No use found, set dead for its def. |
2509 | 357 | assert(&*It == &StartMI && "No new def between StartMI and EndMI."); |
2510 | 357 | MO->setIsDead(true); |
2511 | 357 | break; |
2512 | 357 | } |
2513 | 1.74k | } |
2514 | 1.74k | |
2515 | 1.74k | if ((&*It) == &StartMI) |
2516 | 714 | break; |
2517 | 1.74k | } |
2518 | 1.07k | // Ensure RegMo liveness is killed after EndMI. |
2519 | 1.07k | assert((IsKillSet || (MO && MO->isDead())) && |
2520 | 1.07k | "RegNo should be killed or dead"); |
2521 | 1.07k | } |
2522 | | |
2523 | | // If this instruction has an immediate form and one of its operands is a |
2524 | | // result of a load-immediate or an add-immediate, convert it to |
2525 | | // the immediate form if the constant is in range. |
2526 | | bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI, |
2527 | 238k | MachineInstr **KilledDef) const { |
2528 | 238k | MachineFunction *MF = MI.getParent()->getParent(); |
2529 | 238k | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
2530 | 238k | bool PostRA = !MRI->isSSA(); |
2531 | 238k | bool SeenIntermediateUse = true; |
2532 | 238k | unsigned ForwardingOperand = ~0U; |
2533 | 238k | MachineInstr *DefMI = getForwardingDefMI(MI, ForwardingOperand, |
2534 | 238k | SeenIntermediateUse); |
2535 | 238k | if (!DefMI) |
2536 | 233k | return false; |
2537 | 5.07k | assert(ForwardingOperand < MI.getNumOperands() && |
2538 | 5.07k | "The forwarding operand needs to be valid at this point"); |
2539 | 5.07k | bool IsForwardingOperandKilled = MI.getOperand(ForwardingOperand).isKill(); |
2540 | 5.07k | bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled1.69k ; |
2541 | 5.07k | unsigned ForwardingOperandReg = MI.getOperand(ForwardingOperand).getReg(); |
2542 | 5.07k | if (KilledDef && KillFwdDefMI1.77k ) |
2543 | 1.47k | *KilledDef = DefMI; |
2544 | 5.07k | |
2545 | 5.07k | ImmInstrInfo III; |
2546 | 5.07k | bool HasImmForm = instrHasImmForm(MI, III, PostRA); |
2547 | 5.07k | // If this is a reg+reg instruction that has a reg+imm form, |
2548 | 5.07k | // and one of the operands is produced by an add-immediate, |
2549 | 5.07k | // try to convert it. |
2550 | 5.07k | if (HasImmForm && |
2551 | 5.07k | transformToImmFormFedByAdd(MI, III, ForwardingOperand, *DefMI, |
2552 | 1.74k | KillFwdDefMI)) |
2553 | 473 | return true; |
2554 | 4.60k | |
2555 | 4.60k | if ((DefMI->getOpcode() != PPC::LI && DefMI->getOpcode() != PPC::LI83.50k ) || |
2556 | 4.60k | !DefMI->getOperand(1).isImm()3.72k ) |
2557 | 903 | return false; |
2558 | 3.69k | |
2559 | 3.69k | int64_t Immediate = DefMI->getOperand(1).getImm(); |
2560 | 3.69k | // Sign-extend to 64-bits. |
2561 | 3.69k | int64_t SExtImm = ((uint64_t)Immediate & ~0x7FFFuLL) != 0 ? |
2562 | 3.33k | (Immediate | 0xFFFFFFFFFFFF0000)365 : Immediate; |
2563 | 3.69k | |
2564 | 3.69k | // If this is a reg+reg instruction that has a reg+imm form, |
2565 | 3.69k | // and one of the operands is produced by LI, convert it now. |
2566 | 3.69k | if (HasImmForm) |
2567 | 564 | return transformToImmFormFedByLI(MI, III, ForwardingOperand, *DefMI, SExtImm); |
2568 | 3.13k | |
2569 | 3.13k | bool ReplaceWithLI = false; |
2570 | 3.13k | bool Is64BitLI = false; |
2571 | 3.13k | int64_t NewImm = 0; |
2572 | 3.13k | bool SetCR = false; |
2573 | 3.13k | unsigned Opc = MI.getOpcode(); |
2574 | 3.13k | switch (Opc) { |
2575 | 3.13k | default: return false2.96k ; |
2576 | 3.13k | |
2577 | 3.13k | // FIXME: Any branches conditional on such a comparison can be made |
2578 | 3.13k | // unconditional. At this time, this happens too infrequently to be worth |
2579 | 3.13k | // the implementation effort, but if that ever changes, we could convert |
2580 | 3.13k | // such a pattern here. |
2581 | 3.13k | case PPC::CMPWI: |
2582 | 52 | case PPC::CMPLWI: |
2583 | 52 | case PPC::CMPDI: |
2584 | 52 | case PPC::CMPLDI: { |
2585 | 52 | // Doing this post-RA would require dataflow analysis to reliably find uses |
2586 | 52 | // of the CR register set by the compare. |
2587 | 52 | // No need to fixup killed/dead flag since this transformation is only valid |
2588 | 52 | // before RA. |
2589 | 52 | if (PostRA) |
2590 | 30 | return false; |
2591 | 22 | // If a compare-immediate is fed by an immediate and is itself an input of |
2592 | 22 | // an ISEL (the most common case) into a COPY of the correct register. |
2593 | 22 | bool Changed = false; |
2594 | 22 | unsigned DefReg = MI.getOperand(0).getReg(); |
2595 | 22 | int64_t Comparand = MI.getOperand(2).getImm(); |
2596 | 22 | int64_t SExtComparand = ((uint64_t)Comparand & ~0x7FFFuLL) != 0 ? |
2597 | 20 | (Comparand | 0xFFFFFFFFFFFF0000)2 : Comparand; |
2598 | 22 | |
2599 | 22 | for (auto &CompareUseMI : MRI->use_instructions(DefReg)) { |
2600 | 18 | unsigned UseOpc = CompareUseMI.getOpcode(); |
2601 | 18 | if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL816 ) |
2602 | 10 | continue; |
2603 | 8 | unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg(); |
2604 | 8 | unsigned TrueReg = CompareUseMI.getOperand(1).getReg(); |
2605 | 8 | unsigned FalseReg = CompareUseMI.getOperand(2).getReg(); |
2606 | 8 | unsigned RegToCopy = selectReg(SExtImm, SExtComparand, Opc, TrueReg, |
2607 | 8 | FalseReg, CRSubReg); |
2608 | 8 | if (RegToCopy == PPC::NoRegister) |
2609 | 0 | continue; |
2610 | 8 | // Can't use PPC::COPY to copy PPC::ZERO[8]. Convert it to LI[8] 0. |
2611 | 8 | if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO87 ) { |
2612 | 2 | CompareUseMI.setDesc(get(UseOpc == PPC::ISEL8 ? PPC::LI81 : PPC::LI1 )); |
2613 | 2 | replaceInstrOperandWithImm(CompareUseMI, 1, 0); |
2614 | 2 | CompareUseMI.RemoveOperand(3); |
2615 | 2 | CompareUseMI.RemoveOperand(2); |
2616 | 2 | continue; |
2617 | 2 | } |
2618 | 6 | LLVM_DEBUG( |
2619 | 6 | dbgs() << "Found LI -> CMPI -> ISEL, replacing with a copy.\n"); |
2620 | 6 | LLVM_DEBUG(DefMI->dump(); MI.dump(); CompareUseMI.dump()); |
2621 | 6 | LLVM_DEBUG(dbgs() << "Is converted to:\n"); |
2622 | 6 | // Convert to copy and remove unneeded operands. |
2623 | 6 | CompareUseMI.setDesc(get(PPC::COPY)); |
2624 | 6 | CompareUseMI.RemoveOperand(3); |
2625 | 6 | CompareUseMI.RemoveOperand(RegToCopy == TrueReg ? 21 : 15 ); |
2626 | 6 | CmpIselsConverted++; |
2627 | 6 | Changed = true; |
2628 | 6 | LLVM_DEBUG(CompareUseMI.dump()); |
2629 | 6 | } |
2630 | 22 | if (Changed) |
2631 | 6 | return true; |
2632 | 16 | // This may end up incremented multiple times since this function is called |
2633 | 16 | // during a fixed-point transformation, but it is only meant to indicate the |
2634 | 16 | // presence of this opportunity. |
2635 | 16 | MissedConvertibleImmediateInstrs++; |
2636 | 16 | return false; |
2637 | 16 | } |
2638 | 16 | |
2639 | 16 | // Immediate forms - may simply be convertable to an LI. |
2640 | 16 | case PPC::ADDI: |
2641 | 7 | case PPC::ADDI8: { |
2642 | 7 | // Does the sum fit in a 16-bit signed field? |
2643 | 7 | int64_t Addend = MI.getOperand(2).getImm(); |
2644 | 7 | if (isInt<16>(Addend + SExtImm)) { |
2645 | 7 | ReplaceWithLI = true; |
2646 | 7 | Is64BitLI = Opc == PPC::ADDI8; |
2647 | 7 | NewImm = Addend + SExtImm; |
2648 | 7 | break; |
2649 | 7 | } |
2650 | 0 | return false; |
2651 | 0 | } |
2652 | 28 | case PPC::RLDICL: |
2653 | 28 | case PPC::RLDICLo: |
2654 | 28 | case PPC::RLDICL_32: |
2655 | 28 | case PPC::RLDICL_32_64: { |
2656 | 28 | // Use APInt's rotate function. |
2657 | 28 | int64_t SH = MI.getOperand(2).getImm(); |
2658 | 28 | int64_t MB = MI.getOperand(3).getImm(); |
2659 | 28 | APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICLo20 ) ? |
2660 | 20 | 64 : 328 , SExtImm, true); |
2661 | 28 | InVal = InVal.rotl(SH); |
2662 | 28 | uint64_t Mask = (1LLU << (63 - MB + 1)) - 1; |
2663 | 28 | InVal &= Mask; |
2664 | 28 | // Can't replace negative values with an LI as that will sign-extend |
2665 | 28 | // and not clear the left bits. If we're setting the CR bit, we will use |
2666 | 28 | // ANDIo which won't sign extend, so that's safe. |
2667 | 28 | if (isUInt<15>(InVal.getSExtValue()) || |
2668 | 28 | (2 Opc == PPC::RLDICLo2 && isUInt<16>(InVal.getSExtValue())2 )) { |
2669 | 28 | ReplaceWithLI = true; |
2670 | 28 | Is64BitLI = Opc != PPC::RLDICL_32; |
2671 | 28 | NewImm = InVal.getSExtValue(); |
2672 | 28 | SetCR = Opc == PPC::RLDICLo; |
2673 | 28 | break; |
2674 | 28 | } |
2675 | 0 | return false; |
2676 | 0 | } |
2677 | 20 | case PPC::RLWINM: |
2678 | 20 | case PPC::RLWINM8: |
2679 | 20 | case PPC::RLWINMo: |
2680 | 20 | case PPC::RLWINM8o: { |
2681 | 20 | int64_t SH = MI.getOperand(2).getImm(); |
2682 | 20 | int64_t MB = MI.getOperand(3).getImm(); |
2683 | 20 | int64_t ME = MI.getOperand(4).getImm(); |
2684 | 20 | APInt InVal(32, SExtImm, true); |
2685 | 20 | InVal = InVal.rotl(SH); |
2686 | 20 | // Set the bits ( MB + 32 ) to ( ME + 32 ). |
2687 | 20 | uint64_t Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1); |
2688 | 20 | InVal &= Mask; |
2689 | 20 | // Can't replace negative values with an LI as that will sign-extend |
2690 | 20 | // and not clear the left bits. If we're setting the CR bit, we will use |
2691 | 20 | // ANDIo which won't sign extend, so that's safe. |
2692 | 20 | bool ValueFits = isUInt<15>(InVal.getSExtValue()); |
2693 | 20 | ValueFits |= ((Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o10 ) && |
2694 | 20 | isUInt<16>(InVal.getSExtValue())12 ); |
2695 | 20 | if (ValueFits) { |
2696 | 18 | ReplaceWithLI = true; |
2697 | 18 | Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8o16 ; |
2698 | 18 | NewImm = InVal.getSExtValue(); |
2699 | 18 | SetCR = Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o8 ; |
2700 | 18 | break; |
2701 | 18 | } |
2702 | 2 | return false; |
2703 | 2 | } |
2704 | 68 | case PPC::ORI: |
2705 | 68 | case PPC::ORI8: |
2706 | 68 | case PPC::XORI: |
2707 | 68 | case PPC::XORI8: { |
2708 | 68 | int64_t LogicalImm = MI.getOperand(2).getImm(); |
2709 | 68 | int64_t Result = 0; |
2710 | 68 | if (Opc == PPC::ORI || Opc == PPC::ORI836 ) |
2711 | 63 | Result = LogicalImm | SExtImm; |
2712 | 5 | else |
2713 | 5 | Result = LogicalImm ^ SExtImm; |
2714 | 68 | if (isInt<16>(Result)) { |
2715 | 9 | ReplaceWithLI = true; |
2716 | 9 | Is64BitLI = Opc == PPC::ORI8 || Opc == PPC::XORI87 ; |
2717 | 9 | NewImm = Result; |
2718 | 9 | break; |
2719 | 9 | } |
2720 | 59 | return false; |
2721 | 59 | } |
2722 | 62 | } |
2723 | 62 | |
2724 | 62 | if (ReplaceWithLI) { |
2725 | 62 | // We need to be careful with CR-setting instructions we're replacing. |
2726 | 62 | if (SetCR) { |
2727 | 24 | // We don't know anything about uses when we're out of SSA, so only |
2728 | 24 | // replace if the new immediate will be reproduced. |
2729 | 24 | bool ImmChanged = (SExtImm & NewImm) != NewImm; |
2730 | 24 | if (PostRA && ImmChanged12 ) |
2731 | 6 | return false; |
2732 | 18 | |
2733 | 18 | if (!PostRA) { |
2734 | 12 | // If the defining load-immediate has no other uses, we can just replace |
2735 | 12 | // the immediate with the new immediate. |
2736 | 12 | if (MRI->hasOneUse(DefMI->getOperand(0).getReg())) |
2737 | 6 | DefMI->getOperand(1).setImm(NewImm); |
2738 | 6 | |
2739 | 6 | // If we're not using the GPR result of the CR-setting instruction, we |
2740 | 6 | // just need to and with zero/non-zero depending on the new immediate. |
2741 | 6 | else if (MRI->use_empty(MI.getOperand(0).getReg())) { |
2742 | 6 | if (NewImm) { |
2743 | 4 | assert(Immediate && "Transformation converted zero to non-zero?"); |
2744 | 4 | NewImm = Immediate; |
2745 | 4 | } |
2746 | 6 | } |
2747 | 0 | else if (ImmChanged) |
2748 | 0 | return false; |
2749 | 56 | } |
2750 | 18 | } |
2751 | 56 | |
2752 | 56 | LLVM_DEBUG(dbgs() << "Replacing instruction:\n"); |
2753 | 56 | LLVM_DEBUG(MI.dump()); |
2754 | 56 | LLVM_DEBUG(dbgs() << "Fed by:\n"); |
2755 | 56 | LLVM_DEBUG(DefMI->dump()); |
2756 | 56 | LoadImmediateInfo LII; |
2757 | 56 | LII.Imm = NewImm; |
2758 | 56 | LII.Is64Bit = Is64BitLI; |
2759 | 56 | LII.SetCR = SetCR; |
2760 | 56 | // If we're setting the CR, the original load-immediate must be kept (as an |
2761 | 56 | // operand to ANDIo/ANDI8o). |
2762 | 56 | if (KilledDef && SetCR31 ) |
2763 | 6 | *KilledDef = nullptr; |
2764 | 56 | replaceInstrWithLI(MI, LII); |
2765 | 56 | |
2766 | 56 | // Fixup killed/dead flag after transformation. |
2767 | 56 | // Pattern: |
2768 | 56 | // ForwardingOperandReg = LI imm1 |
2769 | 56 | // y = op2 imm2, ForwardingOperandReg(killed) |
2770 | 56 | if (IsForwardingOperandKilled) |
2771 | 32 | fixupIsDeadOrKill(*DefMI, MI, ForwardingOperandReg); |
2772 | 56 | |
2773 | 56 | LLVM_DEBUG(dbgs() << "With:\n"); |
2774 | 56 | LLVM_DEBUG(MI.dump()); |
2775 | 56 | return true; |
2776 | 56 | } |
2777 | 0 | return false; |
2778 | 0 | } |
2779 | | |
2780 | | bool PPCInstrInfo::instrHasImmForm(const MachineInstr &MI, |
2781 | 108k | ImmInstrInfo &III, bool PostRA) const { |
2782 | 108k | unsigned Opc = MI.getOpcode(); |
2783 | 108k | // The vast majority of the instructions would need their operand 2 replaced |
2784 | 108k | // with an immediate when switching to the reg+imm form. A marked exception |
2785 | 108k | // are the update form loads/stores for which a constant operand 2 would need |
2786 | 108k | // to turn into a displacement and move operand 1 to the operand 2 position. |
2787 | 108k | III.ImmOpNo = 2; |
2788 | 108k | III.OpNoForForwarding = 2; |
2789 | 108k | III.ImmWidth = 16; |
2790 | 108k | III.ImmMustBeMultipleOf = 1; |
2791 | 108k | III.TruncateImmTo = 0; |
2792 | 108k | III.IsSummingOperands = false; |
2793 | 108k | switch (Opc) { |
2794 | 108k | default: return false99.9k ; |
2795 | 108k | case PPC::ADD4: |
2796 | 947 | case PPC::ADD8: |
2797 | 947 | III.SignedImm = true; |
2798 | 947 | III.ZeroIsSpecialOrig = 0; |
2799 | 947 | III.ZeroIsSpecialNew = 1; |
2800 | 947 | III.IsCommutative = true; |
2801 | 947 | III.IsSummingOperands = true; |
2802 | 947 | III.ImmOpcode = Opc == PPC::ADD4 ? PPC::ADDI603 : PPC::ADDI8344 ; |
2803 | 947 | break; |
2804 | 947 | case PPC::ADDC: |
2805 | 50 | case PPC::ADDC8: |
2806 | 50 | III.SignedImm = true; |
2807 | 50 | III.ZeroIsSpecialOrig = 0; |
2808 | 50 | III.ZeroIsSpecialNew = 0; |
2809 | 50 | III.IsCommutative = true; |
2810 | 50 | III.IsSummingOperands = true; |
2811 | 50 | III.ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC26 : PPC::ADDIC824 ; |
2812 | 50 | break; |
2813 | 50 | case PPC::ADDCo: |
2814 | 3 | III.SignedImm = true; |
2815 | 3 | III.ZeroIsSpecialOrig = 0; |
2816 | 3 | III.ZeroIsSpecialNew = 0; |
2817 | 3 | III.IsCommutative = true; |
2818 | 3 | III.IsSummingOperands = true; |
2819 | 3 | III.ImmOpcode = PPC::ADDICo; |
2820 | 3 | break; |
2821 | 138 | case PPC::SUBFC: |
2822 | 138 | case PPC::SUBFC8: |
2823 | 138 | III.SignedImm = true; |
2824 | 138 | III.ZeroIsSpecialOrig = 0; |
2825 | 138 | III.ZeroIsSpecialNew = 0; |
2826 | 138 | III.IsCommutative = false; |
2827 | 138 | III.ImmOpcode = Opc == PPC::SUBFC ? PPC::SUBFIC16 : PPC::SUBFIC8122 ; |
2828 | 138 | break; |
2829 | 635 | case PPC::CMPW: |
2830 | 635 | case PPC::CMPD: |
2831 | 635 | III.SignedImm = true; |
2832 | 635 | III.ZeroIsSpecialOrig = 0; |
2833 | 635 | III.ZeroIsSpecialNew = 0; |
2834 | 635 | III.IsCommutative = false; |
2835 | 635 | III.ImmOpcode = Opc == PPC::CMPW ? PPC::CMPWI381 : PPC::CMPDI254 ; |
2836 | 635 | break; |
2837 | 635 | case PPC::CMPLW: |
2838 | 295 | case PPC::CMPLD: |
2839 | 295 | III.SignedImm = false; |
2840 | 295 | III.ZeroIsSpecialOrig = 0; |
2841 | 295 | III.ZeroIsSpecialNew = 0; |
2842 | 295 | III.IsCommutative = false; |
2843 | 295 | III.ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI178 : PPC::CMPLDI117 ; |
2844 | 295 | break; |
2845 | 3.00k | case PPC::ANDo: |
2846 | 3.00k | case PPC::AND8o: |
2847 | 3.00k | case PPC::OR: |
2848 | 3.00k | case PPC::OR8: |
2849 | 3.00k | case PPC::XOR: |
2850 | 3.00k | case PPC::XOR8: |
2851 | 3.00k | III.SignedImm = false; |
2852 | 3.00k | III.ZeroIsSpecialOrig = 0; |
2853 | 3.00k | III.ZeroIsSpecialNew = 0; |
2854 | 3.00k | III.IsCommutative = true; |
2855 | 3.00k | switch(Opc) { |
2856 | 3.00k | default: 0 llvm_unreachable0 ("Unknown opcode"); |
2857 | 3.00k | case PPC::ANDo: III.ImmOpcode = PPC::ANDIo; break5 ; |
2858 | 3.00k | case PPC::AND8o: III.ImmOpcode = PPC::ANDIo8; break3 ; |
2859 | 3.00k | case PPC::OR: III.ImmOpcode = PPC::ORI; break703 ; |
2860 | 3.00k | case PPC::OR8: III.ImmOpcode = PPC::ORI8; break1.84k ; |
2861 | 3.00k | case PPC::XOR: III.ImmOpcode = PPC::XORI; break282 ; |
2862 | 3.00k | case PPC::XOR8: III.ImmOpcode = PPC::XORI8; break165 ; |
2863 | 3.00k | } |
2864 | 3.00k | break; |
2865 | 3.00k | case PPC::RLWNM: |
2866 | 365 | case PPC::RLWNM8: |
2867 | 365 | case PPC::RLWNMo: |
2868 | 365 | case PPC::RLWNM8o: |
2869 | 365 | case PPC::SLW: |
2870 | 365 | case PPC::SLW8: |
2871 | 365 | case PPC::SLWo: |
2872 | 365 | case PPC::SLW8o: |
2873 | 365 | case PPC::SRW: |
2874 | 365 | case PPC::SRW8: |
2875 | 365 | case PPC::SRWo: |
2876 | 365 | case PPC::SRW8o: |
2877 | 365 | case PPC::SRAW: |
2878 | 365 | case PPC::SRAWo: |
2879 | 365 | III.SignedImm = false; |
2880 | 365 | III.ZeroIsSpecialOrig = 0; |
2881 | 365 | III.ZeroIsSpecialNew = 0; |
2882 | 365 | III.IsCommutative = false; |
2883 | 365 | // This isn't actually true, but the instructions ignore any of the |
2884 | 365 | // upper bits, so any immediate loaded with an LI is acceptable. |
2885 | 365 | // This does not apply to shift right algebraic because a value |
2886 | 365 | // out of range will produce a -1/0. |
2887 | 365 | III.ImmWidth = 16; |
2888 | 365 | if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8341 || |
2889 | 365 | Opc == PPC::RLWNMo338 || Opc == PPC::RLWNM8o334 ) |
2890 | 34 | III.TruncateImmTo = 5; |
2891 | 331 | else |
2892 | 331 | III.TruncateImmTo = 6; |
2893 | 365 | switch(Opc) { |
2894 | 365 | default: 0 llvm_unreachable0 ("Unknown opcode"); |
2895 | 365 | case PPC::RLWNM: III.ImmOpcode = PPC::RLWINM; break24 ; |
2896 | 365 | case PPC::RLWNM8: III.ImmOpcode = PPC::RLWINM8; break3 ; |
2897 | 365 | case PPC::RLWNMo: III.ImmOpcode = PPC::RLWINMo; break4 ; |
2898 | 365 | case PPC::RLWNM8o: III.ImmOpcode = PPC::RLWINM8o; break3 ; |
2899 | 365 | case PPC::SLW: III.ImmOpcode = PPC::RLWINM; break199 ; |
2900 | 365 | case PPC::SLW8: III.ImmOpcode = PPC::RLWINM8; break0 ; |
2901 | 365 | case PPC::SLWo: III.ImmOpcode = PPC::RLWINMo; break7 ; |
2902 | 365 | case PPC::SLW8o: III.ImmOpcode = PPC::RLWINM8o; break0 ; |
2903 | 365 | case PPC::SRW: III.ImmOpcode = PPC::RLWINM; break90 ; |
2904 | 365 | case PPC::SRW8: III.ImmOpcode = PPC::RLWINM8; break0 ; |
2905 | 365 | case PPC::SRWo: III.ImmOpcode = PPC::RLWINMo; break6 ; |
2906 | 365 | case PPC::SRW8o: III.ImmOpcode = PPC::RLWINM8o; break0 ; |
2907 | 365 | case PPC::SRAW: |
2908 | 23 | III.ImmWidth = 5; |
2909 | 23 | III.TruncateImmTo = 0; |
2910 | 23 | III.ImmOpcode = PPC::SRAWI; |
2911 | 23 | break; |
2912 | 365 | case PPC::SRAWo: |
2913 | 6 | III.ImmWidth = 5; |
2914 | 6 | III.TruncateImmTo = 0; |
2915 | 6 | III.ImmOpcode = PPC::SRAWIo; |
2916 | 6 | break; |
2917 | 365 | } |
2918 | 365 | break; |
2919 | 365 | case PPC::RLDCL: |
2920 | 180 | case PPC::RLDCLo: |
2921 | 180 | case PPC::RLDCR: |
2922 | 180 | case PPC::RLDCRo: |
2923 | 180 | case PPC::SLD: |
2924 | 180 | case PPC::SLDo: |
2925 | 180 | case PPC::SRD: |
2926 | 180 | case PPC::SRDo: |
2927 | 180 | case PPC::SRAD: |
2928 | 180 | case PPC::SRADo: |
2929 | 180 | III.SignedImm = false; |
2930 | 180 | III.ZeroIsSpecialOrig = 0; |
2931 | 180 | III.ZeroIsSpecialNew = 0; |
2932 | 180 | III.IsCommutative = false; |
2933 | 180 | // This isn't actually true, but the instructions ignore any of the |
2934 | 180 | // upper bits, so any immediate loaded with an LI is acceptable. |
2935 | 180 | // This does not apply to shift right algebraic because a value |
2936 | 180 | // out of range will produce a -1/0. |
2937 | 180 | III.ImmWidth = 16; |
2938 | 180 | if (Opc == PPC::RLDCL || Opc == PPC::RLDCLo168 || |
2939 | 180 | Opc == PPC::RLDCR162 || Opc == PPC::RLDCRo156 ) |
2940 | 30 | III.TruncateImmTo = 6; |
2941 | 150 | else |
2942 | 150 | III.TruncateImmTo = 7; |
2943 | 180 | switch(Opc) { |
2944 | 180 | default: 0 llvm_unreachable0 ("Unknown opcode"); |
2945 | 180 | case PPC::RLDCL: III.ImmOpcode = PPC::RLDICL; break12 ; |
2946 | 180 | case PPC::RLDCLo: III.ImmOpcode = PPC::RLDICLo; break6 ; |
2947 | 180 | case PPC::RLDCR: III.ImmOpcode = PPC::RLDICR; break6 ; |
2948 | 180 | case PPC::RLDCRo: III.ImmOpcode = PPC::RLDICRo; break6 ; |
2949 | 180 | case PPC::SLD: III.ImmOpcode = PPC::RLDICR; break41 ; |
2950 | 180 | case PPC::SLDo: III.ImmOpcode = PPC::RLDICRo; break7 ; |
2951 | 180 | case PPC::SRD: III.ImmOpcode = PPC::RLDICL; break59 ; |
2952 | 180 | case PPC::SRDo: III.ImmOpcode = PPC::RLDICLo; break6 ; |
2953 | 180 | case PPC::SRAD: |
2954 | 31 | III.ImmWidth = 6; |
2955 | 31 | III.TruncateImmTo = 0; |
2956 | 31 | III.ImmOpcode = PPC::SRADI; |
2957 | 31 | break; |
2958 | 180 | case PPC::SRADo: |
2959 | 6 | III.ImmWidth = 6; |
2960 | 6 | III.TruncateImmTo = 0; |
2961 | 6 | III.ImmOpcode = PPC::SRADIo; |
2962 | 6 | break; |
2963 | 180 | } |
2964 | 180 | break; |
2965 | 180 | // Loads and stores: |
2966 | 1.78k | case PPC::LBZX: |
2967 | 1.78k | case PPC::LBZX8: |
2968 | 1.78k | case PPC::LHZX: |
2969 | 1.78k | case PPC::LHZX8: |
2970 | 1.78k | case PPC::LHAX: |
2971 | 1.78k | case PPC::LHAX8: |
2972 | 1.78k | case PPC::LWZX: |
2973 | 1.78k | case PPC::LWZX8: |
2974 | 1.78k | case PPC::LWAX: |
2975 | 1.78k | case PPC::LDX: |
2976 | 1.78k | case PPC::LFSX: |
2977 | 1.78k | case PPC::LFDX: |
2978 | 1.78k | case PPC::STBX: |
2979 | 1.78k | case PPC::STBX8: |
2980 | 1.78k | case PPC::STHX: |
2981 | 1.78k | case PPC::STHX8: |
2982 | 1.78k | case PPC::STWX: |
2983 | 1.78k | case PPC::STWX8: |
2984 | 1.78k | case PPC::STDX: |
2985 | 1.78k | case PPC::STFSX: |
2986 | 1.78k | case PPC::STFDX: |
2987 | 1.78k | III.SignedImm = true; |
2988 | 1.78k | III.ZeroIsSpecialOrig = 1; |
2989 | 1.78k | III.ZeroIsSpecialNew = 2; |
2990 | 1.78k | III.IsCommutative = true; |
2991 | 1.78k | III.IsSummingOperands = true; |
2992 | 1.78k | III.ImmOpNo = 1; |
2993 | 1.78k | III.OpNoForForwarding = 2; |
2994 | 1.78k | switch(Opc) { |
2995 | 1.78k | default: 0 llvm_unreachable0 ("Unknown opcode"); |
2996 | 1.78k | case PPC::LBZX: III.ImmOpcode = PPC::LBZ; break12 ; |
2997 | 1.78k | case PPC::LBZX8: III.ImmOpcode = PPC::LBZ8; break1 ; |
2998 | 1.78k | case PPC::LHZX: III.ImmOpcode = PPC::LHZ; break8 ; |
2999 | 1.78k | case PPC::LHZX8: III.ImmOpcode = PPC::LHZ8; break1 ; |
3000 | 1.78k | case PPC::LHAX: III.ImmOpcode = PPC::LHA; break6 ; |
3001 | 1.78k | case PPC::LHAX8: III.ImmOpcode = PPC::LHA8; break0 ; |
3002 | 1.78k | case PPC::LWZX: III.ImmOpcode = PPC::LWZ; break50 ; |
3003 | 1.78k | case PPC::LWZX8: III.ImmOpcode = PPC::LWZ8; break8 ; |
3004 | 1.78k | case PPC::LWAX: |
3005 | 22 | III.ImmOpcode = PPC::LWA; |
3006 | 22 | III.ImmMustBeMultipleOf = 4; |
3007 | 22 | break; |
3008 | 1.78k | case PPC::LDX: III.ImmOpcode = PPC::LD; III.ImmMustBeMultipleOf = 4; break88 ; |
3009 | 1.78k | case PPC::LFSX: III.ImmOpcode = PPC::LFS; break623 ; |
3010 | 1.78k | case PPC::LFDX: III.ImmOpcode = PPC::LFD; break458 ; |
3011 | 1.78k | case PPC::STBX: III.ImmOpcode = PPC::STB; break12 ; |
3012 | 1.78k | case PPC::STBX8: III.ImmOpcode = PPC::STB8; break0 ; |
3013 | 1.78k | case PPC::STHX: III.ImmOpcode = PPC::STH; break12 ; |
3014 | 1.78k | case PPC::STHX8: III.ImmOpcode = PPC::STH8; break0 ; |
3015 | 1.78k | case PPC::STWX: III.ImmOpcode = PPC::STW; break47 ; |
3016 | 1.78k | case PPC::STWX8: III.ImmOpcode = PPC::STW8; break24 ; |
3017 | 1.78k | case PPC::STDX: |
3018 | 129 | III.ImmOpcode = PPC::STD; |
3019 | 129 | III.ImmMustBeMultipleOf = 4; |
3020 | 129 | break; |
3021 | 1.78k | case PPC::STFSX: III.ImmOpcode = PPC::STFS; break112 ; |
3022 | 1.78k | case PPC::STFDX: III.ImmOpcode = PPC::STFD; break176 ; |
3023 | 1.78k | } |
3024 | 1.78k | break; |
3025 | 1.78k | case PPC::LBZUX: |
3026 | 204 | case PPC::LBZUX8: |
3027 | 204 | case PPC::LHZUX: |
3028 | 204 | case PPC::LHZUX8: |
3029 | 204 | case PPC::LHAUX: |
3030 | 204 | case PPC::LHAUX8: |
3031 | 204 | case PPC::LWZUX: |
3032 | 204 | case PPC::LWZUX8: |
3033 | 204 | case PPC::LDUX: |
3034 | 204 | case PPC::LFSUX: |
3035 | 204 | case PPC::LFDUX: |
3036 | 204 | case PPC::STBUX: |
3037 | 204 | case PPC::STBUX8: |
3038 | 204 | case PPC::STHUX: |
3039 | 204 | case PPC::STHUX8: |
3040 | 204 | case PPC::STWUX: |
3041 | 204 | case PPC::STWUX8: |
3042 | 204 | case PPC::STDUX: |
3043 | 204 | case PPC::STFSUX: |
3044 | 204 | case PPC::STFDUX: |
3045 | 204 | III.SignedImm = true; |
3046 | 204 | III.ZeroIsSpecialOrig = 2; |
3047 | 204 | III.ZeroIsSpecialNew = 3; |
3048 | 204 | III.IsCommutative = false; |
3049 | 204 | III.IsSummingOperands = true; |
3050 | 204 | III.ImmOpNo = 2; |
3051 | 204 | III.OpNoForForwarding = 3; |
3052 | 204 | switch(Opc) { |
3053 | 204 | default: 0 llvm_unreachable0 ("Unknown opcode"); |
3054 | 204 | case PPC::LBZUX: III.ImmOpcode = PPC::LBZU; break4 ; |
3055 | 204 | case PPC::LBZUX8: III.ImmOpcode = PPC::LBZU8; break0 ; |
3056 | 204 | case PPC::LHZUX: III.ImmOpcode = PPC::LHZU; break3 ; |
3057 | 204 | case PPC::LHZUX8: III.ImmOpcode = PPC::LHZU8; break0 ; |
3058 | 204 | case PPC::LHAUX: III.ImmOpcode = PPC::LHAU; break3 ; |
3059 | 204 | case PPC::LHAUX8: III.ImmOpcode = PPC::LHAU8; break0 ; |
3060 | 204 | case PPC::LWZUX: III.ImmOpcode = PPC::LWZU; break6 ; |
3061 | 204 | case PPC::LWZUX8: III.ImmOpcode = PPC::LWZU8; break0 ; |
3062 | 204 | case PPC::LDUX: |
3063 | 6 | III.ImmOpcode = PPC::LDU; |
3064 | 6 | III.ImmMustBeMultipleOf = 4; |
3065 | 6 | break; |
3066 | 204 | case PPC::LFSUX: III.ImmOpcode = PPC::LFSU; break36 ; |
3067 | 204 | case PPC::LFDUX: III.ImmOpcode = PPC::LFDU; break24 ; |
3068 | 204 | case PPC::STBUX: III.ImmOpcode = PPC::STBU; break6 ; |
3069 | 204 | case PPC::STBUX8: III.ImmOpcode = PPC::STBU8; break2 ; |
3070 | 204 | case PPC::STHUX: III.ImmOpcode = PPC::STHU; break6 ; |
3071 | 204 | case PPC::STHUX8: III.ImmOpcode = PPC::STHU8; break2 ; |
3072 | 204 | case PPC::STWUX: III.ImmOpcode = PPC::STWU; break36 ; |
3073 | 204 | case PPC::STWUX8: III.ImmOpcode = PPC::STWU8; break5 ; |
3074 | 204 | case PPC::STDUX: |
3075 | 53 | III.ImmOpcode = PPC::STDU; |
3076 | 53 | III.ImmMustBeMultipleOf = 4; |
3077 | 53 | break; |
3078 | 204 | case PPC::STFSUX: III.ImmOpcode = PPC::STFSU; break6 ; |
3079 | 204 | case PPC::STFDUX: III.ImmOpcode = PPC::STFDU; break6 ; |
3080 | 204 | } |
3081 | 204 | break; |
3082 | 204 | // Power9 and up only. For some of these, the X-Form version has access to all |
3083 | 204 | // 64 VSR's whereas the D-Form only has access to the VR's. We replace those |
3084 | 204 | // with pseudo-ops pre-ra and for post-ra, we check that the register loaded |
3085 | 204 | // into or stored from is one of the VR registers. |
3086 | 1.34k | case PPC::LXVX: |
3087 | 1.34k | case PPC::LXSSPX: |
3088 | 1.34k | case PPC::LXSDX: |
3089 | 1.34k | case PPC::STXVX: |
3090 | 1.34k | case PPC::STXSSPX: |
3091 | 1.34k | case PPC::STXSDX: |
3092 | 1.34k | case PPC::XFLOADf32: |
3093 | 1.34k | case PPC::XFLOADf64: |
3094 | 1.34k | case PPC::XFSTOREf32: |
3095 | 1.34k | case PPC::XFSTOREf64: |
3096 | 1.34k | if (!Subtarget.hasP9Vector()) |
3097 | 132 | return false; |
3098 | 1.21k | III.SignedImm = true; |
3099 | 1.21k | III.ZeroIsSpecialOrig = 1; |
3100 | 1.21k | III.ZeroIsSpecialNew = 2; |
3101 | 1.21k | III.IsCommutative = true; |
3102 | 1.21k | III.IsSummingOperands = true; |
3103 | 1.21k | III.ImmOpNo = 1; |
3104 | 1.21k | III.OpNoForForwarding = 2; |
3105 | 1.21k | III.ImmMustBeMultipleOf = 4; |
3106 | 1.21k | switch(Opc) { |
3107 | 1.21k | default: 0 llvm_unreachable0 ("Unknown opcode"); |
3108 | 1.21k | case PPC::LXVX: |
3109 | 1.10k | III.ImmOpcode = PPC::LXV; |
3110 | 1.10k | III.ImmMustBeMultipleOf = 16; |
3111 | 1.10k | break; |
3112 | 1.21k | case PPC::LXSSPX: |
3113 | 11 | if (PostRA) { |
3114 | 9 | if (isVFRegister(MI.getOperand(0).getReg())) |
3115 | 1 | III.ImmOpcode = PPC::LXSSP; |
3116 | 8 | else { |
3117 | 8 | III.ImmOpcode = PPC::LFS; |
3118 | 8 | III.ImmMustBeMultipleOf = 1; |
3119 | 8 | } |
3120 | 9 | break; |
3121 | 9 | } |
3122 | 2 | LLVM_FALLTHROUGH; |
3123 | 2 | case PPC::XFLOADf32: |
3124 | 2 | III.ImmOpcode = PPC::DFLOADf32; |
3125 | 2 | break; |
3126 | 13 | case PPC::LXSDX: |
3127 | 13 | if (PostRA) { |
3128 | 11 | if (isVFRegister(MI.getOperand(0).getReg())) |
3129 | 3 | III.ImmOpcode = PPC::LXSD; |
3130 | 8 | else { |
3131 | 8 | III.ImmOpcode = PPC::LFD; |
3132 | 8 | III.ImmMustBeMultipleOf = 1; |
3133 | 8 | } |
3134 | 11 | break; |
3135 | 11 | } |
3136 | 2 | LLVM_FALLTHROUGH; |
3137 | 2 | case PPC::XFLOADf64: |
3138 | 2 | III.ImmOpcode = PPC::DFLOADf64; |
3139 | 2 | break; |
3140 | 63 | case PPC::STXVX: |
3141 | 63 | III.ImmOpcode = PPC::STXV; |
3142 | 63 | III.ImmMustBeMultipleOf = 16; |
3143 | 63 | break; |
3144 | 5 | case PPC::STXSSPX: |
3145 | 5 | if (PostRA) { |
3146 | 4 | if (isVFRegister(MI.getOperand(0).getReg())) |
3147 | 0 | III.ImmOpcode = PPC::STXSSP; |
3148 | 4 | else { |
3149 | 4 | III.ImmOpcode = PPC::STFS; |
3150 | 4 | III.ImmMustBeMultipleOf = 1; |
3151 | 4 | } |
3152 | 4 | break; |
3153 | 4 | } |
3154 | 1 | LLVM_FALLTHROUGH; |
3155 | 1 | case PPC::XFSTOREf32: |
3156 | 1 | III.ImmOpcode = PPC::DFSTOREf32; |
3157 | 1 | break; |
3158 | 14 | case PPC::STXSDX: |
3159 | 14 | if (PostRA) { |
3160 | 13 | if (isVFRegister(MI.getOperand(0).getReg())) |
3161 | 5 | III.ImmOpcode = PPC::STXSD; |
3162 | 8 | else { |
3163 | 8 | III.ImmOpcode = PPC::STFD; |
3164 | 8 | III.ImmMustBeMultipleOf = 1; |
3165 | 8 | } |
3166 | 13 | break; |
3167 | 13 | } |
3168 | 1 | LLVM_FALLTHROUGH; |
3169 | 1 | case PPC::XFSTOREf64: |
3170 | 1 | III.ImmOpcode = PPC::DFSTOREf64; |
3171 | 1 | break; |
3172 | 1.21k | } |
3173 | 1.21k | break; |
3174 | 8.82k | } |
3175 | 8.82k | return true; |
3176 | 8.82k | } |
3177 | | |
3178 | | // Utility function for swaping two arbitrary operands of an instruction. |
3179 | 292 | static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2) { |
3180 | 292 | assert(Op1 != Op2 && "Cannot swap operand with itself."); |
3181 | 292 | |
3182 | 292 | unsigned MaxOp = std::max(Op1, Op2); |
3183 | 292 | unsigned MinOp = std::min(Op1, Op2); |
3184 | 292 | MachineOperand MOp1 = MI.getOperand(MinOp); |
3185 | 292 | MachineOperand MOp2 = MI.getOperand(MaxOp); |
3186 | 292 | MI.RemoveOperand(std::max(Op1, Op2)); |
3187 | 292 | MI.RemoveOperand(std::min(Op1, Op2)); |
3188 | 292 | |
3189 | 292 | // If the operands we are swapping are the two at the end (the common case) |
3190 | 292 | // we can just remove both and add them in the opposite order. |
3191 | 292 | if (MaxOp - MinOp == 1 && MI.getNumOperands() == MinOp) { |
3192 | 172 | MI.addOperand(MOp2); |
3193 | 172 | MI.addOperand(MOp1); |
3194 | 172 | } else { |
3195 | 120 | // Store all operands in a temporary vector, remove them and re-add in the |
3196 | 120 | // right order. |
3197 | 120 | SmallVector<MachineOperand, 2> MOps; |
3198 | 120 | unsigned TotalOps = MI.getNumOperands() + 2; // We've already removed 2 ops. |
3199 | 281 | for (unsigned i = MI.getNumOperands() - 1; i >= MinOp; i--161 ) { |
3200 | 161 | MOps.push_back(MI.getOperand(i)); |
3201 | 161 | MI.RemoveOperand(i); |
3202 | 161 | } |
3203 | 120 | // MOp2 needs to be added next. |
3204 | 120 | MI.addOperand(MOp2); |
3205 | 120 | // Now add the rest. |
3206 | 401 | for (unsigned i = MI.getNumOperands(); i < TotalOps; i++281 ) { |
3207 | 281 | if (i == MaxOp) |
3208 | 120 | MI.addOperand(MOp1); |
3209 | 161 | else { |
3210 | 161 | MI.addOperand(MOps.back()); |
3211 | 161 | MOps.pop_back(); |
3212 | 161 | } |
3213 | 281 | } |
3214 | 120 | } |
3215 | 292 | } |
3216 | | |
3217 | | // Check if the 'MI' that has the index OpNoForForwarding |
3218 | | // meets the requirement described in the ImmInstrInfo. |
3219 | | bool PPCInstrInfo::isUseMIElgibleForForwarding(MachineInstr &MI, |
3220 | | const ImmInstrInfo &III, |
3221 | | unsigned OpNoForForwarding |
3222 | 1.74k | ) const { |
3223 | 1.74k | // As the algorithm of checking for PPC::ZERO/PPC::ZERO8 |
3224 | 1.74k | // would not work pre-RA, we can only do the check post RA. |
3225 | 1.74k | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
3226 | 1.74k | if (MRI.isSSA()) |
3227 | 240 | return false; |
3228 | 1.50k | |
3229 | 1.50k | // Cannot do the transform if MI isn't summing the operands. |
3230 | 1.50k | if (!III.IsSummingOperands) |
3231 | 214 | return false; |
3232 | 1.29k | |
3233 | 1.29k | // The instruction we are trying to replace must have the ZeroIsSpecialOrig set. |
3234 | 1.29k | if (!III.ZeroIsSpecialOrig) |
3235 | 18 | return false; |
3236 | 1.27k | |
3237 | 1.27k | // We cannot do the transform if the operand we are trying to replace |
3238 | 1.27k | // isn't the same as the operand the instruction allows. |
3239 | 1.27k | if (OpNoForForwarding != III.OpNoForForwarding) |
3240 | 92 | return false; |
3241 | 1.18k | |
3242 | 1.18k | // Check if the instruction we are trying to transform really has |
3243 | 1.18k | // the special zero register as its operand. |
3244 | 1.18k | if (MI.getOperand(III.ZeroIsSpecialOrig).getReg() != PPC::ZERO && |
3245 | 1.18k | MI.getOperand(III.ZeroIsSpecialOrig).getReg() != PPC::ZERO81.18k ) |
3246 | 172 | return false; |
3247 | 1.00k | |
3248 | 1.00k | // This machine instruction is convertible if it is, |
3249 | 1.00k | // 1. summing the operands. |
3250 | 1.00k | // 2. one of the operands is special zero register. |
3251 | 1.00k | // 3. the operand we are trying to replace is allowed by the MI. |
3252 | 1.00k | return true; |
3253 | 1.00k | } |
3254 | | |
3255 | | // Check if the DefMI is the add inst and set the ImmMO and RegMO |
3256 | | // accordingly. |
3257 | | bool PPCInstrInfo::isDefMIElgibleForForwarding(MachineInstr &DefMI, |
3258 | | const ImmInstrInfo &III, |
3259 | | MachineOperand *&ImmMO, |
3260 | 1.00k | MachineOperand *&RegMO) const { |
3261 | 1.00k | unsigned Opc = DefMI.getOpcode(); |
3262 | 1.00k | if (Opc != PPC::ADDItocL && Opc != PPC::ADDI151 && Opc != PPC::ADDI8150 ) |
3263 | 7 | return false; |
3264 | 1.00k | |
3265 | 1.00k | assert(DefMI.getNumOperands() >= 3 && |
3266 | 1.00k | "Add inst must have at least three operands"); |
3267 | 1.00k | RegMO = &DefMI.getOperand(1); |
3268 | 1.00k | ImmMO = &DefMI.getOperand(2); |
3269 | 1.00k | |
3270 | 1.00k | // This DefMI is elgible for forwarding if it is: |
3271 | 1.00k | // 1. add inst |
3272 | 1.00k | // 2. one of the operands is Imm/CPI/Global. |
3273 | 1.00k | return isAnImmediateOperand(*ImmMO); |
3274 | 1.00k | } |
3275 | | |
3276 | | bool PPCInstrInfo::isRegElgibleForForwarding( |
3277 | | const MachineOperand &RegMO, const MachineInstr &DefMI, |
3278 | | const MachineInstr &MI, bool KillDefMI, |
3279 | 473 | bool &IsFwdFeederRegKilled) const { |
3280 | 473 | // x = addi y, imm |
3281 | 473 | // ... |
3282 | 473 | // z = lfdx 0, x -> z = lfd imm(y) |
3283 | 473 | // The Reg "y" can be forwarded to the MI(z) only when there is no DEF |
3284 | 473 | // of "y" between the DEF of "x" and "z". |
3285 | 473 | // The query is only valid post RA. |
3286 | 473 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
3287 | 473 | if (MRI.isSSA()) |
3288 | 0 | return false; |
3289 | 473 | |
3290 | 473 | unsigned Reg = RegMO.getReg(); |
3291 | 473 | |
3292 | 473 | // Walking the inst in reverse(MI-->DefMI) to get the last DEF of the Reg. |
3293 | 473 | MachineBasicBlock::const_reverse_iterator It = MI; |
3294 | 473 | MachineBasicBlock::const_reverse_iterator E = MI.getParent()->rend(); |
3295 | 473 | It++; |
3296 | 857 | for (; It != E; ++It384 ) { |
3297 | 857 | if (It->modifiesRegister(Reg, &getRegisterInfo()) && (&*It) != &DefMI341 ) |
3298 | 0 | return false; |
3299 | 857 | else if (It->killsRegister(Reg, &getRegisterInfo()) && (&*It) != &DefMI365 ) |
3300 | 2 | IsFwdFeederRegKilled = true; |
3301 | 857 | // Made it to DefMI without encountering a clobber. |
3302 | 857 | if ((&*It) == &DefMI) |
3303 | 473 | break; |
3304 | 857 | } |
3305 | 473 | assert((&*It) == &DefMI && "DefMI is missing"); |
3306 | 473 | |
3307 | 473 | // If DefMI also defines the register to be forwarded, we can only forward it |
3308 | 473 | // if DefMI is being erased. |
3309 | 473 | if (DefMI.modifiesRegister(Reg, &getRegisterInfo())) |
3310 | 341 | return KillDefMI; |
3311 | 132 | |
3312 | 132 | return true; |
3313 | 132 | } |
3314 | | |
3315 | | bool PPCInstrInfo::isImmElgibleForForwarding(const MachineOperand &ImmMO, |
3316 | | const MachineInstr &DefMI, |
3317 | | const ImmInstrInfo &III, |
3318 | 1.00k | int64_t &Imm) const { |
3319 | 1.00k | assert(isAnImmediateOperand(ImmMO) && "ImmMO is NOT an immediate"); |
3320 | 1.00k | if (DefMI.getOpcode() == PPC::ADDItocL) { |
3321 | 858 | // The operand for ADDItocL is CPI, which isn't imm at compiling time, |
3322 | 858 | // However, we know that, it is 16-bit width, and has the alignment of 4. |
3323 | 858 | // Check if the instruction met the requirement. |
3324 | 858 | if (III.ImmMustBeMultipleOf > 4 || |
3325 | 858 | III.TruncateImmTo342 || III.ImmWidth != 16342 ) |
3326 | 516 | return false; |
3327 | 342 | |
3328 | 342 | // Going from XForm to DForm loads means that the displacement needs to be |
3329 | 342 | // not just an immediate but also a multiple of 4, or 16 depending on the |
3330 | 342 | // load. A DForm load cannot be represented if it is a multiple of say 2. |
3331 | 342 | // XForm loads do not have this restriction. |
3332 | 342 | if (ImmMO.isGlobal() && |
3333 | 342 | ImmMO.getGlobal()->getAlignment() < III.ImmMustBeMultipleOf25 ) |
3334 | 1 | return false; |
3335 | 341 | |
3336 | 341 | return true; |
3337 | 341 | } |
3338 | 144 | |
3339 | 144 | if (ImmMO.isImm()) { |
3340 | 144 | // It is Imm, we need to check if the Imm fit the range. |
3341 | 144 | int64_t Immediate = ImmMO.getImm(); |
3342 | 144 | // Sign-extend to 64-bits. |
3343 | 144 | Imm = ((uint64_t)Immediate & ~0x7FFFuLL) != 0 ? |
3344 | 109 | (Immediate | 0xFFFFFFFFFFFF0000) : Immediate35 ; |
3345 | 144 | |
3346 | 144 | if (Imm % III.ImmMustBeMultipleOf) |
3347 | 12 | return false; |
3348 | 132 | if (III.TruncateImmTo) |
3349 | 0 | Imm &= ((1 << III.TruncateImmTo) - 1); |
3350 | 132 | if (III.SignedImm) { |
3351 | 132 | APInt ActualValue(64, Imm, true); |
3352 | 132 | if (!ActualValue.isSignedIntN(III.ImmWidth)) |
3353 | 0 | return false; |
3354 | 0 | } else { |
3355 | 0 | uint64_t UnsignedMax = (1 << III.ImmWidth) - 1; |
3356 | 0 | if ((uint64_t)Imm > UnsignedMax) |
3357 | 0 | return false; |
3358 | 0 | } |
3359 | 0 | } |
3360 | 0 | else |
3361 | 0 | return false; |
3362 | 132 | |
3363 | 132 | // This ImmMO is forwarded if it meets the requriement describle |
3364 | 132 | // in ImmInstrInfo |
3365 | 132 | return true; |
3366 | 132 | } |
3367 | | |
3368 | | // If an X-Form instruction is fed by an add-immediate and one of its operands |
3369 | | // is the literal zero, attempt to forward the source of the add-immediate to |
3370 | | // the corresponding D-Form instruction with the displacement coming from |
3371 | | // the immediate being added. |
3372 | | bool PPCInstrInfo::transformToImmFormFedByAdd( |
3373 | | MachineInstr &MI, const ImmInstrInfo &III, unsigned OpNoForForwarding, |
3374 | 1.74k | MachineInstr &DefMI, bool KillDefMI) const { |
3375 | 1.74k | // RegMO ImmMO |
3376 | 1.74k | // | | |
3377 | 1.74k | // x = addi reg, imm <----- DefMI |
3378 | 1.74k | // y = op 0 , x <----- MI |
3379 | 1.74k | // | |
3380 | 1.74k | // OpNoForForwarding |
3381 | 1.74k | // Check if the MI meet the requirement described in the III. |
3382 | 1.74k | if (!isUseMIElgibleForForwarding(MI, III, OpNoForForwarding)) |
3383 | 736 | return false; |
3384 | 1.00k | |
3385 | 1.00k | // Check if the DefMI meet the requirement |
3386 | 1.00k | // described in the III. If yes, set the ImmMO and RegMO accordingly. |
3387 | 1.00k | MachineOperand *ImmMO = nullptr; |
3388 | 1.00k | MachineOperand *RegMO = nullptr; |
3389 | 1.00k | if (!isDefMIElgibleForForwarding(DefMI, III, ImmMO, RegMO)) |
3390 | 7 | return false; |
3391 | 1.00k | assert(ImmMO && RegMO && "Imm and Reg operand must have been set"); |
3392 | 1.00k | |
3393 | 1.00k | // As we get the Imm operand now, we need to check if the ImmMO meet |
3394 | 1.00k | // the requirement described in the III. If yes set the Imm. |
3395 | 1.00k | int64_t Imm = 0; |
3396 | 1.00k | if (!isImmElgibleForForwarding(*ImmMO, DefMI, III, Imm)) |
3397 | 529 | return false; |
3398 | 473 | |
3399 | 473 | bool IsFwdFeederRegKilled = false; |
3400 | 473 | // Check if the RegMO can be forwarded to MI. |
3401 | 473 | if (!isRegElgibleForForwarding(*RegMO, DefMI, MI, KillDefMI, |
3402 | 473 | IsFwdFeederRegKilled)) |
3403 | 0 | return false; |
3404 | 473 | |
3405 | 473 | // Get killed info in case fixup needed after transformation. |
3406 | 473 | unsigned ForwardKilledOperandReg = ~0U; |
3407 | 473 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
3408 | 473 | bool PostRA = !MRI.isSSA(); |
3409 | 473 | if (PostRA && MI.getOperand(OpNoForForwarding).isKill()) |
3410 | 470 | ForwardKilledOperandReg = MI.getOperand(OpNoForForwarding).getReg(); |
3411 | 473 | |
3412 | 473 | // We know that, the MI and DefMI both meet the pattern, and |
3413 | 473 | // the Imm also meet the requirement with the new Imm-form. |
3414 | 473 | // It is safe to do the transformation now. |
3415 | 473 | LLVM_DEBUG(dbgs() << "Replacing instruction:\n"); |
3416 | 473 | LLVM_DEBUG(MI.dump()); |
3417 | 473 | LLVM_DEBUG(dbgs() << "Fed by:\n"); |
3418 | 473 | LLVM_DEBUG(DefMI.dump()); |
3419 | 473 | |
3420 | 473 | // Update the base reg first. |
3421 | 473 | MI.getOperand(III.OpNoForForwarding).ChangeToRegister(RegMO->getReg(), |
3422 | 473 | false, false, |
3423 | 473 | RegMO->isKill()); |
3424 | 473 | |
3425 | 473 | // Then, update the imm. |
3426 | 473 | if (ImmMO->isImm()) { |
3427 | 132 | // If the ImmMO is Imm, change the operand that has ZERO to that Imm |
3428 | 132 | // directly. |
3429 | 132 | replaceInstrOperandWithImm(MI, III.ZeroIsSpecialOrig, Imm); |
3430 | 132 | } |
3431 | 341 | else { |
3432 | 341 | // Otherwise, it is Constant Pool Index(CPI) or Global, |
3433 | 341 | // which is relocation in fact. We need to replace the special zero |
3434 | 341 | // register with ImmMO. |
3435 | 341 | // Before that, we need to fixup the target flags for imm. |
3436 | 341 | // For some reason, we miss to set the flag for the ImmMO if it is CPI. |
3437 | 341 | if (DefMI.getOpcode() == PPC::ADDItocL) |
3438 | 341 | ImmMO->setTargetFlags(PPCII::MO_TOC_LO); |
3439 | 341 | |
3440 | 341 | // MI didn't have the interface such as MI.setOperand(i) though |
3441 | 341 | // it has MI.getOperand(i). To repalce the ZERO MachineOperand with |
3442 | 341 | // ImmMO, we need to remove ZERO operand and all the operands behind it, |
3443 | 341 | // and, add the ImmMO, then, move back all the operands behind ZERO. |
3444 | 341 | SmallVector<MachineOperand, 2> MOps; |
3445 | 1.14k | for (unsigned i = MI.getNumOperands() - 1; i >= III.ZeroIsSpecialOrig; i--804 ) { |
3446 | 804 | MOps.push_back(MI.getOperand(i)); |
3447 | 804 | MI.RemoveOperand(i); |
3448 | 804 | } |
3449 | 341 | |
3450 | 341 | // Remove the last MO in the list, which is ZERO operand in fact. |
3451 | 341 | MOps.pop_back(); |
3452 | 341 | // Add the imm operand. |
3453 | 341 | MI.addOperand(*ImmMO); |
3454 | 341 | // Now add the rest back. |
3455 | 341 | for (auto &MO : MOps) |
3456 | 463 | MI.addOperand(MO); |
3457 | 341 | } |
3458 | 473 | |
3459 | 473 | // Update the opcode. |
3460 | 473 | MI.setDesc(get(III.ImmOpcode)); |
3461 | 473 | |
3462 | 473 | // Fix up killed/dead flag after transformation. |
3463 | 473 | // Pattern 1: |
3464 | 473 | // x = ADD KilledFwdFeederReg, imm |
3465 | 473 | // n = opn KilledFwdFeederReg(killed), regn |
3466 | 473 | // y = XOP 0, x |
3467 | 473 | // Pattern 2: |
3468 | 473 | // x = ADD reg(killed), imm |
3469 | 473 | // y = XOP 0, x |
3470 | 473 | if (IsFwdFeederRegKilled || RegMO->isKill()471 ) |
3471 | 365 | fixupIsDeadOrKill(DefMI, MI, RegMO->getReg()); |
3472 | 473 | // Pattern 3: |
3473 | 473 | // ForwardKilledOperandReg = ADD reg, imm |
3474 | 473 | // y = XOP 0, ForwardKilledOperandReg(killed) |
3475 | 473 | if (ForwardKilledOperandReg != ~0U) |
3476 | 470 | fixupIsDeadOrKill(DefMI, MI, ForwardKilledOperandReg); |
3477 | 473 | |
3478 | 473 | LLVM_DEBUG(dbgs() << "With:\n"); |
3479 | 473 | LLVM_DEBUG(MI.dump()); |
3480 | 473 | |
3481 | 473 | return true; |
3482 | 473 | } |
3483 | | |
3484 | | bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI, |
3485 | | const ImmInstrInfo &III, |
3486 | | unsigned ConstantOpNo, |
3487 | | MachineInstr &DefMI, |
3488 | 564 | int64_t Imm) const { |
3489 | 564 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
3490 | 564 | bool PostRA = !MRI.isSSA(); |
3491 | 564 | // Exit early if we can't convert this. |
3492 | 564 | if ((ConstantOpNo != III.OpNoForForwarding) && !III.IsCommutative164 ) |
3493 | 104 | return false; |
3494 | 460 | if (Imm % III.ImmMustBeMultipleOf) |
3495 | 43 | return false; |
3496 | 417 | if (III.TruncateImmTo) |
3497 | 64 | Imm &= ((1 << III.TruncateImmTo) - 1); |
3498 | 417 | if (III.SignedImm) { |
3499 | 278 | APInt ActualValue(64, Imm, true); |
3500 | 278 | if (!ActualValue.isSignedIntN(III.ImmWidth)) |
3501 | 0 | return false; |
3502 | 139 | } else { |
3503 | 139 | uint64_t UnsignedMax = (1 << III.ImmWidth) - 1; |
3504 | 139 | if ((uint64_t)Imm > UnsignedMax) |
3505 | 43 | return false; |
3506 | 374 | } |
3507 | 374 | |
3508 | 374 | // If we're post-RA, the instructions don't agree on whether register zero is |
3509 | 374 | // special, we can transform this as long as the register operand that will |
3510 | 374 | // end up in the location where zero is special isn't R0. |
3511 | 374 | if (PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew236 ) { |
3512 | 185 | unsigned PosForOrigZero = III.ZeroIsSpecialOrig ? III.ZeroIsSpecialOrig173 : |
3513 | 185 | III.ZeroIsSpecialNew + 112 ; |
3514 | 185 | unsigned OrigZeroReg = MI.getOperand(PosForOrigZero).getReg(); |
3515 | 185 | unsigned NewZeroReg = MI.getOperand(III.ZeroIsSpecialNew).getReg(); |
3516 | 185 | // If R0 is in the operand where zero is special for the new instruction, |
3517 | 185 | // it is unsafe to transform if the constant operand isn't that operand. |
3518 | 185 | if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0183 ) && |
3519 | 185 | ConstantOpNo != III.ZeroIsSpecialNew4 ) |
3520 | 2 | return false; |
3521 | 183 | if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0181 ) && |
3522 | 183 | ConstantOpNo != PosForOrigZero2 ) |
3523 | 1 | return false; |
3524 | 371 | } |
3525 | 371 | |
3526 | 371 | // Get killed info in case fixup needed after transformation. |
3527 | 371 | unsigned ForwardKilledOperandReg = ~0U; |
3528 | 371 | if (PostRA && MI.getOperand(ConstantOpNo).isKill()233 ) |
3529 | 208 | ForwardKilledOperandReg = MI.getOperand(ConstantOpNo).getReg(); |
3530 | 371 | |
3531 | 371 | unsigned Opc = MI.getOpcode(); |
3532 | 371 | bool SpecialShift32 = |
3533 | 371 | Opc == PPC::SLW || Opc == PPC::SLWo364 || Opc == PPC::SRW360 || Opc == PPC::SRWo353 ; |
3534 | 371 | bool SpecialShift64 = |
3535 | 371 | Opc == PPC::SLD || Opc == PPC::SLDo367 || Opc == PPC::SRD363 || Opc == PPC::SRDo357 ; |
3536 | 371 | bool SetCR = Opc == PPC::SLWo || Opc == PPC::SRWo367 || |
3537 | 371 | Opc == PPC::SLDo363 || Opc == PPC::SRDo359 ; |
3538 | 371 | bool RightShift = |
3539 | 371 | Opc == PPC::SRW || Opc == PPC::SRWo364 || Opc == PPC::SRD360 || Opc == PPC::SRDo354 ; |
3540 | 371 | |
3541 | 371 | MI.setDesc(get(III.ImmOpcode)); |
3542 | 371 | if (ConstantOpNo == III.OpNoForForwarding) { |
3543 | 313 | // Converting shifts to immediate form is a bit tricky since they may do |
3544 | 313 | // one of three things: |
3545 | 313 | // 1. If the shift amount is between OpSize and 2*OpSize, the result is zero |
3546 | 313 | // 2. If the shift amount is zero, the result is unchanged (save for maybe |
3547 | 313 | // setting CR0) |
3548 | 313 | // 3. If the shift amount is in [1, OpSize), it's just a shift |
3549 | 313 | if (SpecialShift32 || SpecialShift64291 ) { |
3550 | 40 | LoadImmediateInfo LII; |
3551 | 40 | LII.Imm = 0; |
3552 | 40 | LII.SetCR = SetCR; |
3553 | 40 | LII.Is64Bit = SpecialShift64; |
3554 | 40 | uint64_t ShAmt = Imm & (SpecialShift32 ? 0x1F22 : 0x3F18 ); |
3555 | 40 | if (Imm & (SpecialShift32 ? 0x2022 : 0x4018 )) |
3556 | 15 | replaceInstrWithLI(MI, LII); |
3557 | 25 | // Shifts by zero don't change the value. If we don't need to set CR0, |
3558 | 25 | // just convert this to a COPY. Can't do this post-RA since we've already |
3559 | 25 | // cleaned up the copies. |
3560 | 25 | else if (!SetCR && ShAmt == 017 && !PostRA0 ) { |
3561 | 0 | MI.RemoveOperand(2); |
3562 | 0 | MI.setDesc(get(PPC::COPY)); |
3563 | 25 | } else { |
3564 | 25 | // The 32 bit and 64 bit instructions are quite different. |
3565 | 25 | if (SpecialShift32) { |
3566 | 15 | // Left shifts use (N, 0, 31-N), right shifts use (32-N, N, 31). |
3567 | 15 | uint64_t SH = RightShift ? 32 - ShAmt6 : ShAmt9 ; |
3568 | 15 | uint64_t MB = RightShift ? ShAmt6 : 09 ; |
3569 | 15 | uint64_t ME = RightShift ? 316 : 31 - ShAmt9 ; |
3570 | 15 | replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH); |
3571 | 15 | MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(MB) |
3572 | 15 | .addImm(ME); |
3573 | 15 | } else { |
3574 | 10 | // Left shifts use (N, 63-N), right shifts use (64-N, N). |
3575 | 10 | uint64_t SH = RightShift ? 64 - ShAmt6 : ShAmt4 ; |
3576 | 10 | uint64_t ME = RightShift ? ShAmt6 : 63 - ShAmt4 ; |
3577 | 10 | replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH); |
3578 | 10 | MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(ME); |
3579 | 10 | } |
3580 | 25 | } |
3581 | 40 | } else |
3582 | 273 | replaceInstrOperandWithImm(MI, ConstantOpNo, Imm); |
3583 | 313 | } |
3584 | 58 | // Convert commutative instructions (switch the operands and convert the |
3585 | 58 | // desired one to an immediate. |
3586 | 58 | else if (III.IsCommutative) { |
3587 | 58 | replaceInstrOperandWithImm(MI, ConstantOpNo, Imm); |
3588 | 58 | swapMIOperands(MI, ConstantOpNo, III.OpNoForForwarding); |
3589 | 58 | } else |
3590 | 58 | llvm_unreachable0 ("Should have exited early!"); |
3591 | 371 | |
3592 | 371 | // For instructions for which the constant register replaces a different |
3593 | 371 | // operand than where the immediate goes, we need to swap them. |
3594 | 371 | if (III.OpNoForForwarding != III.ImmOpNo) |
3595 | 234 | swapMIOperands(MI, III.OpNoForForwarding, III.ImmOpNo); |
3596 | 371 | |
3597 | 371 | // If the special R0/X0 register index are different for original instruction |
3598 | 371 | // and new instruction, we need to fix up the register class in new |
3599 | 371 | // instruction. |
3600 | 371 | if (!PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew138 ) { |
3601 | 77 | if (III.ZeroIsSpecialNew) { |
3602 | 77 | // If operand at III.ZeroIsSpecialNew is physical reg(eg: ZERO/ZERO8), no |
3603 | 77 | // need to fix up register class. |
3604 | 77 | unsigned RegToModify = MI.getOperand(III.ZeroIsSpecialNew).getReg(); |
3605 | 77 | if (TargetRegisterInfo::isVirtualRegister(RegToModify)) { |
3606 | 77 | const TargetRegisterClass *NewRC = |
3607 | 77 | MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ? |
3608 | 69 | &PPC::GPRC_and_GPRC_NOR0RegClass8 : &PPC::G8RC_and_G8RC_NOX0RegClass; |
3609 | 77 | MRI.setRegClass(RegToModify, NewRC); |
3610 | 77 | } |
3611 | 77 | } |
3612 | 77 | } |
3613 | 371 | |
3614 | 371 | // Fix up killed/dead flag after transformation. |
3615 | 371 | // Pattern: |
3616 | 371 | // ForwardKilledOperandReg = LI imm |
3617 | 371 | // y = XOP reg, ForwardKilledOperandReg(killed) |
3618 | 371 | if (ForwardKilledOperandReg != ~0U) |
3619 | 208 | fixupIsDeadOrKill(DefMI, MI, ForwardKilledOperandReg); |
3620 | 371 | return true; |
3621 | 371 | } |
3622 | | |
3623 | | const TargetRegisterClass * |
3624 | 2.74k | PPCInstrInfo::updatedRC(const TargetRegisterClass *RC) const { |
3625 | 2.74k | if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass1.59k ) |
3626 | 220 | return &PPC::VSRCRegClass; |
3627 | 2.52k | return RC; |
3628 | 2.52k | } |
3629 | | |
3630 | 0 | int PPCInstrInfo::getRecordFormOpcode(unsigned Opcode) { |
3631 | 0 | return PPC::getRecordFormOpcode(Opcode); |
3632 | 0 | } |
3633 | | |
3634 | | // This function returns true if the machine instruction |
3635 | | // always outputs a value by sign-extending a 32 bit value, |
3636 | | // i.e. 0 to 31-th bits are same as 32-th bit. |
3637 | 849 | static bool isSignExtendingOp(const MachineInstr &MI) { |
3638 | 849 | int Opcode = MI.getOpcode(); |
3639 | 849 | if (Opcode == PPC::LI || Opcode == PPC::LI8840 || |
3640 | 849 | Opcode == PPC::LIS840 || Opcode == PPC::LIS8839 || |
3641 | 849 | Opcode == PPC::SRAW839 || Opcode == PPC::SRAWo839 || |
3642 | 849 | Opcode == PPC::SRAWI839 || Opcode == PPC::SRAWIo838 || |
3643 | 849 | Opcode == PPC::LWA838 || Opcode == PPC::LWAX838 || |
3644 | 849 | Opcode == PPC::LWA_32838 || Opcode == PPC::LWAX_32838 || |
3645 | 849 | Opcode == PPC::LHA838 || Opcode == PPC::LHAX838 || |
3646 | 849 | Opcode == PPC::LHA8838 || Opcode == PPC::LHAX8838 || |
3647 | 849 | Opcode == PPC::LBZ838 || Opcode == PPC::LBZX835 || |
3648 | 849 | Opcode == PPC::LBZ8835 || Opcode == PPC::LBZX8835 || |
3649 | 849 | Opcode == PPC::LBZU835 || Opcode == PPC::LBZUX835 || |
3650 | 849 | Opcode == PPC::LBZU8835 || Opcode == PPC::LBZUX8835 || |
3651 | 849 | Opcode == PPC::LHZ835 || Opcode == PPC::LHZX834 || |
3652 | 849 | Opcode == PPC::LHZ8834 || Opcode == PPC::LHZX8834 || |
3653 | 849 | Opcode == PPC::LHZU834 || Opcode == PPC::LHZUX834 || |
3654 | 849 | Opcode == PPC::LHZU8834 || Opcode == PPC::LHZUX8834 || |
3655 | 849 | Opcode == PPC::EXTSB834 || Opcode == PPC::EXTSBo826 || |
3656 | 849 | Opcode == PPC::EXTSH826 || Opcode == PPC::EXTSHo826 || |
3657 | 849 | Opcode == PPC::EXTSB8826 || Opcode == PPC::EXTSH8826 || |
3658 | 849 | Opcode == PPC::EXTSW826 || Opcode == PPC::EXTSWo826 || |
3659 | 849 | Opcode == PPC::SETB826 || Opcode == PPC::SETB8826 || |
3660 | 849 | Opcode == PPC::EXTSH8_32_64826 || Opcode == PPC::EXTSW_32_64826 || |
3661 | 849 | Opcode == PPC::EXTSB8_32_64823 ) |
3662 | 26 | return true; |
3663 | 823 | |
3664 | 823 | if (Opcode == PPC::RLDICL && MI.getOperand(3).getImm() >= 332 ) |
3665 | 2 | return true; |
3666 | 821 | |
3667 | 821 | if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo776 || |
3668 | 821 | Opcode == PPC::RLWNM776 || Opcode == PPC::RLWNMo776 ) && |
3669 | 821 | MI.getOperand(3).getImm() > 045 && |
3670 | 821 | MI.getOperand(3).getImm() <= MI.getOperand(4).getImm()43 ) |
3671 | 43 | return true; |
3672 | 778 | |
3673 | 778 | return false; |
3674 | 778 | } |
3675 | | |
3676 | | // This function returns true if the machine instruction |
3677 | | // always outputs zeros in higher 32 bits. |
3678 | 733 | static bool isZeroExtendingOp(const MachineInstr &MI) { |
3679 | 733 | int Opcode = MI.getOpcode(); |
3680 | 733 | // The 16-bit immediate is sign-extended in li/lis. |
3681 | 733 | // If the most significant bit is zero, all higher bits are zero. |
3682 | 733 | if (Opcode == PPC::LI || Opcode == PPC::LI8717 || |
3683 | 733 | Opcode == PPC::LIS715 || Opcode == PPC::LIS8715 ) { |
3684 | 18 | int64_t Imm = MI.getOperand(1).getImm(); |
3685 | 18 | if (((uint64_t)Imm & ~0x7FFFuLL) == 0) |
3686 | 17 | return true; |
3687 | 716 | } |
3688 | 716 | |
3689 | 716 | // We have some variations of rotate-and-mask instructions |
3690 | 716 | // that clear higher 32-bits. |
3691 | 716 | if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICLo713 || |
3692 | 716 | Opcode == PPC::RLDCL713 || Opcode == PPC::RLDCLo713 || |
3693 | 716 | Opcode == PPC::RLDICL_32_64713 ) && |
3694 | 716 | MI.getOperand(3).getImm() >= 323 ) |
3695 | 1 | return true; |
3696 | 715 | |
3697 | 715 | if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDICo) && |
3698 | 715 | MI.getOperand(3).getImm() >= 320 && |
3699 | 715 | MI.getOperand(3).getImm() <= 63 - MI.getOperand(2).getImm()0 ) |
3700 | 0 | return true; |
3701 | 715 | |
3702 | 715 | if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo613 || |
3703 | 715 | Opcode == PPC::RLWNM613 || Opcode == PPC::RLWNMo612 || |
3704 | 715 | Opcode == PPC::RLWINM8612 || Opcode == PPC::RLWNM8611 ) && |
3705 | 715 | MI.getOperand(3).getImm() <= MI.getOperand(4).getImm()104 ) |
3706 | 103 | return true; |
3707 | 612 | |
3708 | 612 | // There are other instructions that clear higher 32-bits. |
3709 | 612 | if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZWo || |
3710 | 612 | Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZWo || |
3711 | 612 | Opcode == PPC::CNTLZW8 || Opcode == PPC::CNTTZW8 || |
3712 | 612 | Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZDo || |
3713 | 612 | Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZDo || |
3714 | 612 | Opcode == PPC::POPCNTD || Opcode == PPC::POPCNTW || |
3715 | 612 | Opcode == PPC::SLW || Opcode == PPC::SLWo604 || |
3716 | 612 | Opcode == PPC::SRW604 || Opcode == PPC::SRWo603 || |
3717 | 612 | Opcode == PPC::SLW8603 || Opcode == PPC::SRW8603 || |
3718 | 612 | Opcode == PPC::SLWI603 || Opcode == PPC::SLWIo603 || |
3719 | 612 | Opcode == PPC::SRWI603 || Opcode == PPC::SRWIo603 || |
3720 | 612 | Opcode == PPC::LWZ603 || Opcode == PPC::LWZX553 || |
3721 | 612 | Opcode == PPC::LWZU537 || Opcode == PPC::LWZUX531 || |
3722 | 612 | Opcode == PPC::LWBRX531 || Opcode == PPC::LHBRX531 || |
3723 | 612 | Opcode == PPC::LHZ531 || Opcode == PPC::LHZX524 || |
3724 | 612 | Opcode == PPC::LHZU524 || Opcode == PPC::LHZUX524 || |
3725 | 612 | Opcode == PPC::LBZ524 || Opcode == PPC::LBZX513 || |
3726 | 612 | Opcode == PPC::LBZU513 || Opcode == PPC::LBZUX513 || |
3727 | 612 | Opcode == PPC::LWZ8513 || Opcode == PPC::LWZX8513 || |
3728 | 612 | Opcode == PPC::LWZU8513 || Opcode == PPC::LWZUX8513 || |
3729 | 612 | Opcode == PPC::LWBRX8513 || Opcode == PPC::LHBRX8513 || |
3730 | 612 | Opcode == PPC::LHZ8513 || Opcode == PPC::LHZX8513 || |
3731 | 612 | Opcode == PPC::LHZU8513 || Opcode == PPC::LHZUX8513 || |
3732 | 612 | Opcode == PPC::LBZ8513 || Opcode == PPC::LBZX8513 || |
3733 | 612 | Opcode == PPC::LBZU8513 || Opcode == PPC::LBZUX8513 || |
3734 | 612 | Opcode == PPC::ANDIo513 || Opcode == PPC::ANDISo511 || |
3735 | 612 | Opcode == PPC::ROTRWI511 || Opcode == PPC::ROTRWIo511 || |
3736 | 612 | Opcode == PPC::EXTLWI511 || Opcode == PPC::EXTLWIo511 || |
3737 | 612 | Opcode == PPC::MFVSRWZ511 ) |
3738 | 101 | return true; |
3739 | 511 | |
3740 | 511 | return false; |
3741 | 511 | } |
3742 | | |
3743 | | // This function returns true if the input MachineInstr is a TOC save |
3744 | | // instruction. |
3745 | 355 | bool PPCInstrInfo::isTOCSaveMI(const MachineInstr &MI) const { |
3746 | 355 | if (!MI.getOperand(1).isImm() || !MI.getOperand(2).isReg()260 ) |
3747 | 206 | return false; |
3748 | 149 | unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); |
3749 | 149 | unsigned StackOffset = MI.getOperand(1).getImm(); |
3750 | 149 | unsigned StackReg = MI.getOperand(2).getReg(); |
3751 | 149 | if (StackReg == PPC::X1 && StackOffset == TOCSaveOffset80 ) |
3752 | 29 | return true; |
3753 | 120 | |
3754 | 120 | return false; |
3755 | 120 | } |
3756 | | |
3757 | | // We limit the max depth to track incoming values of PHIs or binary ops |
3758 | | // (e.g. AND) to avoid excessive cost. |
3759 | | const unsigned MAX_DEPTH = 1; |
3760 | | |
3761 | | bool |
3762 | | PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, |
3763 | 1.58k | const unsigned Depth) const { |
3764 | 1.58k | const MachineFunction *MF = MI.getParent()->getParent(); |
3765 | 1.58k | const MachineRegisterInfo *MRI = &MF->getRegInfo(); |
3766 | 1.58k | |
3767 | 1.58k | // If we know this instruction returns sign- or zero-extended result, |
3768 | 1.58k | // return true. |
3769 | 1.58k | if (SignExt ? isSignExtendingOp(MI)849 : |
3770 | 1.58k | isZeroExtendingOp(MI)733 ) |
3771 | 293 | return true; |
3772 | 1.28k | |
3773 | 1.28k | switch (MI.getOpcode()) { |
3774 | 1.28k | case PPC::COPY: { |
3775 | 1.04k | unsigned SrcReg = MI.getOperand(1).getReg(); |
3776 | 1.04k | |
3777 | 1.04k | // In both ELFv1 and v2 ABI, method parameters and the return value |
3778 | 1.04k | // are sign- or zero-extended. |
3779 | 1.04k | if (MF->getSubtarget<PPCSubtarget>().isSVR4ABI()) { |
3780 | 1.04k | const PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>(); |
3781 | 1.04k | // We check the ZExt/SExt flags for a method parameter. |
3782 | 1.04k | if (MI.getParent()->getBasicBlock() == |
3783 | 1.04k | &MF->getFunction().getEntryBlock()) { |
3784 | 1.01k | unsigned VReg = MI.getOperand(0).getReg(); |
3785 | 1.01k | if (MF->getRegInfo().isLiveIn(VReg)) |
3786 | 500 | return SignExt ? FuncInfo->isLiveInSExt(VReg)317 : |
3787 | 500 | FuncInfo->isLiveInZExt(VReg)183 ; |
3788 | 540 | } |
3789 | 540 | |
3790 | 540 | // For a method return value, we check the ZExt/SExt flags in attribute. |
3791 | 540 | // We assume the following code sequence for method call. |
3792 | 540 | // ADJCALLSTACKDOWN 32, implicit dead %r1, implicit %r1 |
3793 | 540 | // BL8_NOP @func,... |
3794 | 540 | // ADJCALLSTACKUP 32, 0, implicit dead %r1, implicit %r1 |
3795 | 540 | // %5 = COPY %x3; G8RC:%5 |
3796 | 540 | if (SrcReg == PPC::X3) { |
3797 | 24 | const MachineBasicBlock *MBB = MI.getParent(); |
3798 | 24 | MachineBasicBlock::const_instr_iterator II = |
3799 | 24 | MachineBasicBlock::const_instr_iterator(&MI); |
3800 | 24 | if (II != MBB->instr_begin() && |
3801 | 24 | (--II)->getOpcode() == PPC::ADJCALLSTACKUP) { |
3802 | 21 | const MachineInstr &CallMI = *(--II); |
3803 | 21 | if (CallMI.isCall() && CallMI.getOperand(0).isGlobal()) { |
3804 | 18 | const Function *CalleeFn = |
3805 | 18 | dyn_cast<Function>(CallMI.getOperand(0).getGlobal()); |
3806 | 18 | if (!CalleeFn) |
3807 | 0 | return false; |
3808 | 18 | const IntegerType *IntTy = |
3809 | 18 | dyn_cast<IntegerType>(CalleeFn->getReturnType()); |
3810 | 18 | const AttributeSet &Attrs = |
3811 | 18 | CalleeFn->getAttributes().getRetAttributes(); |
3812 | 18 | if (IntTy && IntTy->getBitWidth() <= 32) |
3813 | 18 | return Attrs.hasAttribute(SignExt ? Attribute::SExt14 : |
3814 | 18 | Attribute::ZExt4 ); |
3815 | 522 | } |
3816 | 21 | } |
3817 | 24 | } |
3818 | 540 | } |
3819 | 522 | |
3820 | 522 | // If this is a copy from another register, we recursively check source. |
3821 | 522 | if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) |
3822 | 6 | return false; |
3823 | 516 | const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg); |
3824 | 516 | if (SrcMI != NULL) |
3825 | 516 | return isSignOrZeroExtended(*SrcMI, SignExt, Depth); |
3826 | 0 | |
3827 | 0 | return false; |
3828 | 0 | } |
3829 | 0 |
|
3830 | 7 | case PPC::ANDIo: |
3831 | 7 | case PPC::ANDISo: |
3832 | 7 | case PPC::ORI: |
3833 | 7 | case PPC::ORIS: |
3834 | 7 | case PPC::XORI: |
3835 | 7 | case PPC::XORIS: |
3836 | 7 | case PPC::ANDIo8: |
3837 | 7 | case PPC::ANDISo8: |
3838 | 7 | case PPC::ORI8: |
3839 | 7 | case PPC::ORIS8: |
3840 | 7 | case PPC::XORI8: |
3841 | 7 | case PPC::XORIS8: { |
3842 | 7 | // logical operation with 16-bit immediate does not change the upper bits. |
3843 | 7 | // So, we track the operand register as we do for register copy. |
3844 | 7 | unsigned SrcReg = MI.getOperand(1).getReg(); |
3845 | 7 | if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) |
3846 | 0 | return false; |
3847 | 7 | const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg); |
3848 | 7 | if (SrcMI != NULL) |
3849 | 7 | return isSignOrZeroExtended(*SrcMI, SignExt, Depth); |
3850 | 0 | |
3851 | 0 | return false; |
3852 | 0 | } |
3853 | 0 |
|
3854 | 0 | // If all incoming values are sign-/zero-extended, |
3855 | 0 | // the output of OR, ISEL or PHI is also sign-/zero-extended. |
3856 | 56 | case PPC::OR: |
3857 | 56 | case PPC::OR8: |
3858 | 56 | case PPC::ISEL: |
3859 | 56 | case PPC::PHI: { |
3860 | 56 | if (Depth >= MAX_DEPTH) |
3861 | 5 | return false; |
3862 | 51 | |
3863 | 51 | // The input registers for PHI are operand 1, 3, ... |
3864 | 51 | // The input registers for others are operand 1 and 2. |
3865 | 51 | unsigned E = 3, D = 1; |
3866 | 51 | if (MI.getOpcode() == PPC::PHI) { |
3867 | 43 | E = MI.getNumOperands(); |
3868 | 43 | D = 2; |
3869 | 43 | } |
3870 | 51 | |
3871 | 96 | for (unsigned I = 1; I != E; I += D45 ) { |
3872 | 86 | if (MI.getOperand(I).isReg()) { |
3873 | 86 | unsigned SrcReg = MI.getOperand(I).getReg(); |
3874 | 86 | if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) |
3875 | 0 | return false; |
3876 | 86 | const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg); |
3877 | 86 | if (SrcMI == NULL || !isSignOrZeroExtended(*SrcMI, SignExt, Depth+1)) |
3878 | 41 | return false; |
3879 | 0 | } |
3880 | 0 | else |
3881 | 0 | return false; |
3882 | 86 | } |
3883 | 51 | return true10 ; |
3884 | 51 | } |
3885 | 51 | |
3886 | 51 | // If at least one of the incoming values of an AND is zero extended |
3887 | 51 | // then the output is also zero-extended. If both of the incoming values |
3888 | 51 | // are sign-extended then the output is also sign extended. |
3889 | 51 | case PPC::AND: |
3890 | 14 | case PPC::AND8: { |
3891 | 14 | if (Depth >= MAX_DEPTH) |
3892 | 1 | return false; |
3893 | 13 | |
3894 | 13 | assert(MI.getOperand(1).isReg() && MI.getOperand(2).isReg()); |
3895 | 13 | |
3896 | 13 | unsigned SrcReg1 = MI.getOperand(1).getReg(); |
3897 | 13 | unsigned SrcReg2 = MI.getOperand(2).getReg(); |
3898 | 13 | |
3899 | 13 | if (!TargetRegisterInfo::isVirtualRegister(SrcReg1) || |
3900 | 13 | !TargetRegisterInfo::isVirtualRegister(SrcReg2)) |
3901 | 0 | return false; |
3902 | 13 | |
3903 | 13 | const MachineInstr *MISrc1 = MRI->getVRegDef(SrcReg1); |
3904 | 13 | const MachineInstr *MISrc2 = MRI->getVRegDef(SrcReg2); |
3905 | 13 | if (!MISrc1 || !MISrc2) |
3906 | 0 | return false; |
3907 | 13 | |
3908 | 13 | if(SignExt) |
3909 | 11 | return isSignOrZeroExtended(*MISrc1, SignExt, Depth+1) && |
3910 | 11 | isSignOrZeroExtended(*MISrc2, SignExt, Depth+1)0 ; |
3911 | 2 | else |
3912 | 2 | return isSignOrZeroExtended(*MISrc1, SignExt, Depth+1) || |
3913 | 2 | isSignOrZeroExtended(*MISrc2, SignExt, Depth+1)1 ; |
3914 | 0 | } |
3915 | 0 | |
3916 | 172 | default: |
3917 | 172 | break; |
3918 | 172 | } |
3919 | 172 | return false; |
3920 | 172 | } |
3921 | | |
3922 | 2 | bool PPCInstrInfo::isBDNZ(unsigned Opcode) const { |
3923 | 2 | return (Opcode == (Subtarget.isPPC64() ? PPC::BDNZ8 : PPC::BDNZ0 )); |
3924 | 2 | } |
3925 | | |
3926 | | bool PPCInstrInfo::analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, |
3927 | 2 | MachineInstr *&CmpInst) const { |
3928 | 2 | MachineBasicBlock *LoopEnd = L.getBottomBlock(); |
3929 | 2 | MachineBasicBlock::iterator I = LoopEnd->getFirstTerminator(); |
3930 | 2 | // We really "analyze" only CTR loops right now. |
3931 | 2 | if (I != LoopEnd->end() && isBDNZ(I->getOpcode())) { |
3932 | 2 | IndVarInst = nullptr; |
3933 | 2 | CmpInst = &*I; |
3934 | 2 | return false; |
3935 | 2 | } |
3936 | 0 | return true; |
3937 | 0 | } |
3938 | | |
3939 | | MachineInstr * |
3940 | 2 | PPCInstrInfo::findLoopInstr(MachineBasicBlock &PreHeader) const { |
3941 | 2 | |
3942 | 2 | unsigned LOOPi = (Subtarget.isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop0 ); |
3943 | 2 | |
3944 | 2 | // The loop set-up instruction should be in preheader |
3945 | 2 | for (auto &I : PreHeader.instrs()) |
3946 | 11 | if (I.getOpcode() == LOOPi) |
3947 | 2 | return &I; |
3948 | 2 | return nullptr0 ; |
3949 | 2 | } |
3950 | | |
3951 | | unsigned PPCInstrInfo::reduceLoopCount( |
3952 | | MachineBasicBlock &MBB, MachineBasicBlock &PreHeader, MachineInstr *IndVar, |
3953 | | MachineInstr &Cmp, SmallVectorImpl<MachineOperand> &Cond, |
3954 | | SmallVectorImpl<MachineInstr *> &PrevInsts, unsigned Iter, |
3955 | 2 | unsigned MaxIter) const { |
3956 | 2 | // We expect a hardware loop currently. This means that IndVar is set |
3957 | 2 | // to null, and the compare is the ENDLOOP instruction. |
3958 | 2 | assert((!IndVar) && isBDNZ(Cmp.getOpcode()) && "Expecting a CTR loop"); |
3959 | 2 | MachineFunction *MF = MBB.getParent(); |
3960 | 2 | DebugLoc DL = Cmp.getDebugLoc(); |
3961 | 2 | MachineInstr *Loop = findLoopInstr(PreHeader); |
3962 | 2 | if (!Loop) |
3963 | 0 | return 0; |
3964 | 2 | unsigned LoopCountReg = Loop->getOperand(0).getReg(); |
3965 | 2 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3966 | 2 | MachineInstr *LoopCount = MRI.getUniqueVRegDef(LoopCountReg); |
3967 | 2 | |
3968 | 2 | if (!LoopCount) |
3969 | 0 | return 0; |
3970 | 2 | // If the loop trip count is a compile-time value, then just change the |
3971 | 2 | // value. |
3972 | 2 | if (LoopCount->getOpcode() == PPC::LI8 || LoopCount->getOpcode() == PPC::LI1 ) { |
3973 | 1 | int64_t Offset = LoopCount->getOperand(1).getImm(); |
3974 | 1 | if (Offset <= 1) { |
3975 | 0 | LoopCount->eraseFromParent(); |
3976 | 0 | Loop->eraseFromParent(); |
3977 | 0 | return 0; |
3978 | 0 | } |
3979 | 1 | LoopCount->getOperand(1).setImm(Offset - 1); |
3980 | 1 | return Offset - 1; |
3981 | 1 | } |
3982 | 1 | |
3983 | 1 | // The loop trip count is a run-time value. |
3984 | 1 | // We need to subtract one from the trip count, |
3985 | 1 | // and insert branch later to check if we're done with the loop. |
3986 | 1 | |
3987 | 1 | // Since BDZ/BDZ8 that we will insert will also decrease the ctr by 1, |
3988 | 1 | // so we don't need to generate any thing here. |
3989 | 1 | Cond.push_back(MachineOperand::CreateImm(0)); |
3990 | 1 | Cond.push_back(MachineOperand::CreateReg( |
3991 | 1 | Subtarget.isPPC64() ? PPC::CTR8 : PPC::CTR0 , true)); |
3992 | 1 | return LoopCountReg; |
3993 | 1 | } |
3994 | | |
3995 | | // Return true if get the base operand, byte offset of an instruction and the |
3996 | | // memory width. Width is the size of memory that is being loaded/stored. |
3997 | | bool PPCInstrInfo::getMemOperandWithOffsetWidth( |
3998 | | const MachineInstr &LdSt, |
3999 | | const MachineOperand *&BaseReg, |
4000 | | int64_t &Offset, |
4001 | | unsigned &Width, |
4002 | 85.2k | const TargetRegisterInfo *TRI) const { |
4003 | 85.2k | assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); |
4004 | 85.2k | |
4005 | 85.2k | // Handle only loads/stores with base register followed by immediate offset. |
4006 | 85.2k | if (LdSt.getNumExplicitOperands() != 3) |
4007 | 702 | return false; |
4008 | 84.5k | if (!LdSt.getOperand(1).isImm() || !LdSt.getOperand(2).isReg()74.4k ) |
4009 | 18.8k | return false; |
4010 | 65.7k | |
4011 | 65.7k | if (!LdSt.hasOneMemOperand()) |
4012 | 0 | return false; |
4013 | 65.7k | |
4014 | 65.7k | Width = (*LdSt.memoperands_begin())->getSize(); |
4015 | 65.7k | Offset = LdSt.getOperand(1).getImm(); |
4016 | 65.7k | BaseReg = &LdSt.getOperand(2); |
4017 | 65.7k | return true; |
4018 | 65.7k | } |
4019 | | |
4020 | | bool PPCInstrInfo::areMemAccessesTriviallyDisjoint( |
4021 | | const MachineInstr &MIa, const MachineInstr &MIb, |
4022 | 51.7k | AliasAnalysis * /*AA*/) const { |
4023 | 51.7k | assert(MIa.mayLoadOrStore() && "MIa must be a load or store."); |
4024 | 51.7k | assert(MIb.mayLoadOrStore() && "MIb must be a load or store."); |
4025 | 51.7k | |
4026 | 51.7k | if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() || |
4027 | 51.7k | MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) |
4028 | 0 | return false; |
4029 | 51.7k | |
4030 | 51.7k | // Retrieve the base register, offset from the base register and width. Width |
4031 | 51.7k | // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If |
4032 | 51.7k | // base registers are identical, and the offset of a lower memory access + |
4033 | 51.7k | // the width doesn't overlap the offset of a higher memory access, |
4034 | 51.7k | // then the memory accesses are different. |
4035 | 51.7k | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4036 | 51.7k | const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; |
4037 | 51.7k | int64_t OffsetA = 0, OffsetB = 0; |
4038 | 51.7k | unsigned int WidthA = 0, WidthB = 0; |
4039 | 51.7k | if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) && |
4040 | 51.7k | getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)33.5k ) { |
4041 | 32.1k | if (BaseOpA->isIdenticalTo(*BaseOpB)) { |
4042 | 21.6k | int LowOffset = std::min(OffsetA, OffsetB); |
4043 | 21.6k | int HighOffset = std::max(OffsetA, OffsetB); |
4044 | 21.6k | int LowWidth = (LowOffset == OffsetA) ? WidthA8.80k : WidthB12.8k ; |
4045 | 21.6k | if (LowOffset + LowWidth <= HighOffset) |
4046 | 20.1k | return true; |
4047 | 31.5k | } |
4048 | 32.1k | } |
4049 | 31.5k | return false; |
4050 | 31.5k | } |