/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file contains the Sparc implementation of the TargetInstrInfo class. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "SparcInstrInfo.h" |
14 | | #include "Sparc.h" |
15 | | #include "SparcMachineFunctionInfo.h" |
16 | | #include "SparcSubtarget.h" |
17 | | #include "llvm/ADT/STLExtras.h" |
18 | | #include "llvm/ADT/SmallVector.h" |
19 | | #include "llvm/CodeGen/MachineFrameInfo.h" |
20 | | #include "llvm/CodeGen/MachineInstrBuilder.h" |
21 | | #include "llvm/CodeGen/MachineMemOperand.h" |
22 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
23 | | #include "llvm/Support/ErrorHandling.h" |
24 | | #include "llvm/Support/TargetRegistry.h" |
25 | | |
26 | | using namespace llvm; |
27 | | |
28 | | #define GET_INSTRINFO_CTOR_DTOR |
29 | | #include "SparcGenInstrInfo.inc" |
30 | | |
31 | | // Pin the vtable to this file. |
32 | 0 | void SparcInstrInfo::anchor() {} |
33 | | |
34 | | SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST) |
35 | | : SparcGenInstrInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), RI(), |
36 | 425 | Subtarget(ST) {} |
37 | | |
38 | | /// isLoadFromStackSlot - If the specified machine instruction is a direct |
39 | | /// load from a stack slot, return the virtual or physical register number of |
40 | | /// the destination along with the FrameIndex of the loaded stack slot. If |
41 | | /// not, return 0. This predicate must return 0 if the instruction has |
42 | | /// any side effects other than loading from the stack slot. |
43 | | unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
44 | 6.57k | int &FrameIndex) const { |
45 | 6.57k | if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri6.56k || |
46 | 6.57k | MI.getOpcode() == SP::LDFri6.55k || MI.getOpcode() == SP::LDDFri6.55k || |
47 | 6.57k | MI.getOpcode() == SP::LDQFri4.53k ) { |
48 | 2.04k | if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm()16 && |
49 | 2.04k | MI.getOperand(2).getImm() == 016 ) { |
50 | 14 | FrameIndex = MI.getOperand(1).getIndex(); |
51 | 14 | return MI.getOperand(0).getReg(); |
52 | 14 | } |
53 | 6.56k | } |
54 | 6.56k | return 0; |
55 | 6.56k | } |
56 | | |
57 | | /// isStoreToStackSlot - If the specified machine instruction is a direct |
58 | | /// store to a stack slot, return the virtual or physical register number of |
59 | | /// the source reg along with the FrameIndex of the loaded stack slot. If |
60 | | /// not, return 0. This predicate must return 0 if the instruction has |
61 | | /// any side effects other than storing to the stack slot. |
62 | | unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
63 | 9.62k | int &FrameIndex) const { |
64 | 9.62k | if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri9.59k || |
65 | 9.62k | MI.getOpcode() == SP::STFri9.58k || MI.getOpcode() == SP::STDFri9.58k || |
66 | 9.62k | MI.getOpcode() == SP::STQFri5.53k ) { |
67 | 4.09k | if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm()46 && |
68 | 4.09k | MI.getOperand(1).getImm() == 046 ) { |
69 | 28 | FrameIndex = MI.getOperand(0).getIndex(); |
70 | 28 | return MI.getOperand(2).getReg(); |
71 | 28 | } |
72 | 9.59k | } |
73 | 9.59k | return 0; |
74 | 9.59k | } |
75 | | |
76 | | static bool IsIntegerCC(unsigned CC) |
77 | 264 | { |
78 | 264 | return (CC <= SPCC::ICC_VC); |
79 | 264 | } |
80 | | |
81 | | static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC) |
82 | 212 | { |
83 | 212 | switch(CC) { |
84 | 212 | case SPCC::ICC_A: return SPCC::ICC_N0 ; |
85 | 212 | case SPCC::ICC_N: return SPCC::ICC_A0 ; |
86 | 212 | case SPCC::ICC_NE: return SPCC::ICC_E70 ; |
87 | 212 | case SPCC::ICC_E: return SPCC::ICC_NE71 ; |
88 | 212 | case SPCC::ICC_G: return SPCC::ICC_LE1 ; |
89 | 212 | case SPCC::ICC_LE: return SPCC::ICC_G0 ; |
90 | 212 | case SPCC::ICC_GE: return SPCC::ICC_L3 ; |
91 | 212 | case SPCC::ICC_L: return SPCC::ICC_GE3 ; |
92 | 212 | case SPCC::ICC_GU: return SPCC::ICC_LEU6 ; |
93 | 212 | case SPCC::ICC_LEU: return SPCC::ICC_GU4 ; |
94 | 212 | case SPCC::ICC_CC: return SPCC::ICC_CS8 ; |
95 | 212 | case SPCC::ICC_CS: return SPCC::ICC_CC8 ; |
96 | 212 | case SPCC::ICC_POS: return SPCC::ICC_NEG0 ; |
97 | 212 | case SPCC::ICC_NEG: return SPCC::ICC_POS0 ; |
98 | 212 | case SPCC::ICC_VC: return SPCC::ICC_VS0 ; |
99 | 212 | case SPCC::ICC_VS: return SPCC::ICC_VC0 ; |
100 | 212 | |
101 | 212 | case SPCC::FCC_A: return SPCC::FCC_N0 ; |
102 | 212 | case SPCC::FCC_N: return SPCC::FCC_A0 ; |
103 | 212 | case SPCC::FCC_U: return SPCC::FCC_O0 ; |
104 | 212 | case SPCC::FCC_O: return SPCC::FCC_U0 ; |
105 | 212 | case SPCC::FCC_G: return SPCC::FCC_ULE6 ; |
106 | 212 | case SPCC::FCC_LE: return SPCC::FCC_UG0 ; |
107 | 212 | case SPCC::FCC_UG: return SPCC::FCC_LE0 ; |
108 | 212 | case SPCC::FCC_ULE: return SPCC::FCC_G3 ; |
109 | 212 | case SPCC::FCC_L: return SPCC::FCC_UGE11 ; |
110 | 212 | case SPCC::FCC_GE: return SPCC::FCC_UL2 ; |
111 | 212 | case SPCC::FCC_UL: return SPCC::FCC_GE2 ; |
112 | 212 | case SPCC::FCC_UGE: return SPCC::FCC_L8 ; |
113 | 212 | case SPCC::FCC_LG: return SPCC::FCC_UE0 ; |
114 | 212 | case SPCC::FCC_UE: return SPCC::FCC_LG0 ; |
115 | 212 | case SPCC::FCC_NE: return SPCC::FCC_E3 ; |
116 | 212 | case SPCC::FCC_E: return SPCC::FCC_NE3 ; |
117 | 212 | |
118 | 212 | case SPCC::CPCC_A: return SPCC::CPCC_N0 ; |
119 | 212 | case SPCC::CPCC_N: return SPCC::CPCC_A0 ; |
120 | 212 | case SPCC::CPCC_3: 0 LLVM_FALLTHROUGH0 ; |
121 | 0 | case SPCC::CPCC_2: LLVM_FALLTHROUGH; |
122 | 0 | case SPCC::CPCC_23: LLVM_FALLTHROUGH; |
123 | 0 | case SPCC::CPCC_1: LLVM_FALLTHROUGH; |
124 | 0 | case SPCC::CPCC_13: LLVM_FALLTHROUGH; |
125 | 0 | case SPCC::CPCC_12: LLVM_FALLTHROUGH; |
126 | 0 | case SPCC::CPCC_123: LLVM_FALLTHROUGH; |
127 | 0 | case SPCC::CPCC_0: LLVM_FALLTHROUGH; |
128 | 0 | case SPCC::CPCC_03: LLVM_FALLTHROUGH; |
129 | 0 | case SPCC::CPCC_02: LLVM_FALLTHROUGH; |
130 | 0 | case SPCC::CPCC_023: LLVM_FALLTHROUGH; |
131 | 0 | case SPCC::CPCC_01: LLVM_FALLTHROUGH; |
132 | 0 | case SPCC::CPCC_013: LLVM_FALLTHROUGH; |
133 | 0 | case SPCC::CPCC_012: |
134 | 0 | // "Opposite" code is not meaningful, as we don't know |
135 | 0 | // what the CoProc condition means here. The cond-code will |
136 | 0 | // only be used in inline assembler, so this code should |
137 | 0 | // not be reached in a normal compilation pass. |
138 | 0 | llvm_unreachable("Meaningless inversion of co-processor cond code"); |
139 | 0 | } |
140 | 0 | llvm_unreachable("Invalid cond code"); |
141 | 0 | } |
142 | | |
143 | 6.68k | static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; } |
144 | | |
145 | 5.72k | static bool isCondBranchOpcode(int Opc) { |
146 | 5.72k | return Opc == SP::FBCOND || Opc == SP::BCOND5.41k ; |
147 | 5.72k | } |
148 | | |
149 | 103 | static bool isIndirectBranchOpcode(int Opc) { |
150 | 103 | return Opc == SP::BINDrr || Opc == SP::BINDri; |
151 | 103 | } |
152 | | |
153 | | static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, |
154 | 2.35k | SmallVectorImpl<MachineOperand> &Cond) { |
155 | 2.35k | Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(1).getImm())); |
156 | 2.35k | Target = LastInst->getOperand(0).getMBB(); |
157 | 2.35k | } |
158 | | |
159 | | bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
160 | | MachineBasicBlock *&TBB, |
161 | | MachineBasicBlock *&FBB, |
162 | | SmallVectorImpl<MachineOperand> &Cond, |
163 | 8.05k | bool AllowModify) const { |
164 | 8.05k | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
165 | 8.05k | if (I == MBB.end()) |
166 | 298 | return false; |
167 | 7.75k | |
168 | 7.75k | if (!isUnpredicatedTerminator(*I)) |
169 | 1.51k | return false; |
170 | 6.24k | |
171 | 6.24k | // Get the last instruction in the block. |
172 | 6.24k | MachineInstr *LastInst = &*I; |
173 | 6.24k | unsigned LastOpc = LastInst->getOpcode(); |
174 | 6.24k | |
175 | 6.24k | // If there is only one terminator instruction, process it. |
176 | 6.24k | if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)5.98k ) { |
177 | 5.54k | if (isUncondBranchOpcode(LastOpc)) { |
178 | 524 | TBB = LastInst->getOperand(0).getMBB(); |
179 | 524 | return false; |
180 | 524 | } |
181 | 5.02k | if (isCondBranchOpcode(LastOpc)) { |
182 | 1.75k | // Block ends with fall-through condbranch. |
183 | 1.75k | parseCondBranch(LastInst, TBB, Cond); |
184 | 1.75k | return false; |
185 | 1.75k | } |
186 | 3.27k | return true; // Can't handle indirect branch. |
187 | 3.27k | } |
188 | 701 | |
189 | 701 | // Get the instruction before it if it is a terminator. |
190 | 701 | MachineInstr *SecondLastInst = &*I; |
191 | 701 | unsigned SecondLastOpc = SecondLastInst->getOpcode(); |
192 | 701 | |
193 | 701 | // If AllowModify is true and the block ends with two or more unconditional |
194 | 701 | // branches, delete all but the first unconditional branch. |
195 | 701 | if (AllowModify && isUncondBranchOpcode(LastOpc)217 ) { |
196 | 217 | while (isUncondBranchOpcode(SecondLastOpc)) { |
197 | 0 | LastInst->eraseFromParent(); |
198 | 0 | LastInst = SecondLastInst; |
199 | 0 | LastOpc = LastInst->getOpcode(); |
200 | 0 | if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { |
201 | 0 | // Return now the only terminator is an unconditional branch. |
202 | 0 | TBB = LastInst->getOperand(0).getMBB(); |
203 | 0 | return false; |
204 | 0 | } else { |
205 | 0 | SecondLastInst = &*I; |
206 | 0 | SecondLastOpc = SecondLastInst->getOpcode(); |
207 | 0 | } |
208 | 0 | } |
209 | 217 | } |
210 | 701 | |
211 | 701 | // If there are three terminators, we don't know what sort of block this is. |
212 | 701 | if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I)699 ) |
213 | 0 | return true; |
214 | 701 | |
215 | 701 | // If the block ends with a B and a Bcc, handle it. |
216 | 701 | if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)598 ) { |
217 | 598 | parseCondBranch(SecondLastInst, TBB, Cond); |
218 | 598 | FBB = LastInst->getOperand(0).getMBB(); |
219 | 598 | return false; |
220 | 598 | } |
221 | 103 | |
222 | 103 | // If the block ends with two unconditional branches, handle it. The second |
223 | 103 | // one is not executed. |
224 | 103 | if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)0 ) { |
225 | 0 | TBB = SecondLastInst->getOperand(0).getMBB(); |
226 | 0 | return false; |
227 | 0 | } |
228 | 103 | |
229 | 103 | // ...likewise if it ends with an indirect branch followed by an unconditional |
230 | 103 | // branch. |
231 | 103 | if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)0 ) { |
232 | 0 | I = LastInst; |
233 | 0 | if (AllowModify) |
234 | 0 | I->eraseFromParent(); |
235 | 0 | return true; |
236 | 0 | } |
237 | 103 | |
238 | 103 | // Otherwise, can't handle this. |
239 | 103 | return true; |
240 | 103 | } |
241 | | |
242 | | unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB, |
243 | | MachineBasicBlock *TBB, |
244 | | MachineBasicBlock *FBB, |
245 | | ArrayRef<MachineOperand> Cond, |
246 | | const DebugLoc &DL, |
247 | 343 | int *BytesAdded) const { |
248 | 343 | assert(TBB && "insertBranch must not be told to insert a fallthrough"); |
249 | 343 | assert((Cond.size() == 1 || Cond.size() == 0) && |
250 | 343 | "Sparc branch conditions should have one component!"); |
251 | 343 | assert(!BytesAdded && "code size not handled"); |
252 | 343 | |
253 | 343 | if (Cond.empty()) { |
254 | 79 | assert(!FBB && "Unconditional branch with multiple successors!"); |
255 | 79 | BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB); |
256 | 79 | return 1; |
257 | 79 | } |
258 | 264 | |
259 | 264 | // Conditional branch |
260 | 264 | unsigned CC = Cond[0].getImm(); |
261 | 264 | |
262 | 264 | if (IsIntegerCC(CC)) |
263 | 218 | BuildMI(&MBB, DL, get(SP::BCOND)).addMBB(TBB).addImm(CC); |
264 | 46 | else |
265 | 46 | BuildMI(&MBB, DL, get(SP::FBCOND)).addMBB(TBB).addImm(CC); |
266 | 264 | if (!FBB) |
267 | 258 | return 1; |
268 | 6 | |
269 | 6 | BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB); |
270 | 6 | return 2; |
271 | 6 | } |
272 | | |
273 | | unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB, |
274 | 377 | int *BytesRemoved) const { |
275 | 377 | assert(!BytesRemoved && "code size not handled"); |
276 | 377 | |
277 | 377 | MachineBasicBlock::iterator I = MBB.end(); |
278 | 377 | unsigned Count = 0; |
279 | 799 | while (I != MBB.begin()) { |
280 | 791 | --I; |
281 | 791 | |
282 | 791 | if (I->isDebugInstr()) |
283 | 0 | continue; |
284 | 791 | |
285 | 791 | if (I->getOpcode() != SP::BA |
286 | 791 | && I->getOpcode() != SP::BCOND634 |
287 | 791 | && I->getOpcode() != SP::FBCOND415 ) |
288 | 369 | break; // Not a branch |
289 | 422 | |
290 | 422 | I->eraseFromParent(); |
291 | 422 | I = MBB.end(); |
292 | 422 | ++Count; |
293 | 422 | } |
294 | 377 | return Count; |
295 | 377 | } |
296 | | |
297 | | bool SparcInstrInfo::reverseBranchCondition( |
298 | 212 | SmallVectorImpl<MachineOperand> &Cond) const { |
299 | 212 | assert(Cond.size() == 1); |
300 | 212 | SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[0].getImm()); |
301 | 212 | Cond[0].setImm(GetOppositeBranchCondition(CC)); |
302 | 212 | return false; |
303 | 212 | } |
304 | | |
305 | | void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
306 | | MachineBasicBlock::iterator I, |
307 | | const DebugLoc &DL, unsigned DestReg, |
308 | 852 | unsigned SrcReg, bool KillSrc) const { |
309 | 852 | unsigned numSubRegs = 0; |
310 | 852 | unsigned movOpc = 0; |
311 | 852 | const unsigned *subRegIdx = nullptr; |
312 | 852 | bool ExtraG0 = false; |
313 | 852 | |
314 | 852 | const unsigned DW_SubRegsIdx[] = { SP::sub_even, SP::sub_odd }; |
315 | 852 | const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd }; |
316 | 852 | const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 }; |
317 | 852 | const unsigned QFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd, |
318 | 852 | SP::sub_odd64_then_sub_even, |
319 | 852 | SP::sub_odd64_then_sub_odd }; |
320 | 852 | |
321 | 852 | if (SP::IntRegsRegClass.contains(DestReg, SrcReg)) |
322 | 794 | BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0) |
323 | 794 | .addReg(SrcReg, getKillRegState(KillSrc)); |
324 | 58 | else if (SP::IntPairRegClass.contains(DestReg, SrcReg)) { |
325 | 0 | subRegIdx = DW_SubRegsIdx; |
326 | 0 | numSubRegs = 2; |
327 | 0 | movOpc = SP::ORrr; |
328 | 0 | ExtraG0 = true; |
329 | 58 | } else if (SP::FPRegsRegClass.contains(DestReg, SrcReg)) |
330 | 27 | BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg) |
331 | 27 | .addReg(SrcReg, getKillRegState(KillSrc)); |
332 | 31 | else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg)) { |
333 | 16 | if (Subtarget.isV9()) { |
334 | 13 | BuildMI(MBB, I, DL, get(SP::FMOVD), DestReg) |
335 | 13 | .addReg(SrcReg, getKillRegState(KillSrc)); |
336 | 13 | } else { |
337 | 3 | // Use two FMOVS instructions. |
338 | 3 | subRegIdx = DFP_FP_SubRegsIdx; |
339 | 3 | numSubRegs = 2; |
340 | 3 | movOpc = SP::FMOVS; |
341 | 3 | } |
342 | 16 | } else if (15 SP::QFPRegsRegClass.contains(DestReg, SrcReg)15 ) { |
343 | 0 | if (Subtarget.isV9()) { |
344 | 0 | if (Subtarget.hasHardQuad()) { |
345 | 0 | BuildMI(MBB, I, DL, get(SP::FMOVQ), DestReg) |
346 | 0 | .addReg(SrcReg, getKillRegState(KillSrc)); |
347 | 0 | } else { |
348 | 0 | // Use two FMOVD instructions. |
349 | 0 | subRegIdx = QFP_DFP_SubRegsIdx; |
350 | 0 | numSubRegs = 2; |
351 | 0 | movOpc = SP::FMOVD; |
352 | 0 | } |
353 | 0 | } else { |
354 | 0 | // Use four FMOVS instructions. |
355 | 0 | subRegIdx = QFP_FP_SubRegsIdx; |
356 | 0 | numSubRegs = 4; |
357 | 0 | movOpc = SP::FMOVS; |
358 | 0 | } |
359 | 15 | } else if (SP::ASRRegsRegClass.contains(DestReg) && |
360 | 15 | SP::IntRegsRegClass.contains(SrcReg)6 ) { |
361 | 6 | BuildMI(MBB, I, DL, get(SP::WRASRrr), DestReg) |
362 | 6 | .addReg(SP::G0) |
363 | 6 | .addReg(SrcReg, getKillRegState(KillSrc)); |
364 | 9 | } else if (SP::IntRegsRegClass.contains(DestReg) && |
365 | 9 | SP::ASRRegsRegClass.contains(SrcReg)) { |
366 | 9 | BuildMI(MBB, I, DL, get(SP::RDASR), DestReg) |
367 | 9 | .addReg(SrcReg, getKillRegState(KillSrc)); |
368 | 9 | } else |
369 | 9 | llvm_unreachable0 ("Impossible reg-to-reg copy"); |
370 | 852 | |
371 | 852 | if (numSubRegs == 0 || subRegIdx == nullptr3 || movOpc == 03 ) |
372 | 849 | return; |
373 | 3 | |
374 | 3 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
375 | 3 | MachineInstr *MovMI = nullptr; |
376 | 3 | |
377 | 9 | for (unsigned i = 0; i != numSubRegs; ++i6 ) { |
378 | 6 | unsigned Dst = TRI->getSubReg(DestReg, subRegIdx[i]); |
379 | 6 | unsigned Src = TRI->getSubReg(SrcReg, subRegIdx[i]); |
380 | 6 | assert(Dst && Src && "Bad sub-register"); |
381 | 6 | |
382 | 6 | MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(movOpc), Dst); |
383 | 6 | if (ExtraG0) |
384 | 0 | MIB.addReg(SP::G0); |
385 | 6 | MIB.addReg(Src); |
386 | 6 | MovMI = MIB.getInstr(); |
387 | 6 | } |
388 | 3 | // Add implicit super-register defs and kills to the last MovMI. |
389 | 3 | MovMI->addRegisterDefined(DestReg, TRI); |
390 | 3 | if (KillSrc) |
391 | 3 | MovMI->addRegisterKilled(SrcReg, TRI); |
392 | 3 | } |
393 | | |
394 | | void SparcInstrInfo:: |
395 | | storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, |
396 | | unsigned SrcReg, bool isKill, int FI, |
397 | | const TargetRegisterClass *RC, |
398 | 1.33k | const TargetRegisterInfo *TRI) const { |
399 | 1.33k | DebugLoc DL; |
400 | 1.33k | if (I != MBB.end()) DL = I->getDebugLoc()1.31k ; |
401 | 1.33k | |
402 | 1.33k | MachineFunction *MF = MBB.getParent(); |
403 | 1.33k | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
404 | 1.33k | MachineMemOperand *MMO = MF->getMachineMemOperand( |
405 | 1.33k | MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, |
406 | 1.33k | MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); |
407 | 1.33k | |
408 | 1.33k | // On the order of operands here: think "[FrameIdx + 0] = SrcReg". |
409 | 1.33k | if (RC == &SP::I64RegsRegClass) |
410 | 17 | BuildMI(MBB, I, DL, get(SP::STXri)).addFrameIndex(FI).addImm(0) |
411 | 17 | .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); |
412 | 1.32k | else if (RC == &SP::IntRegsRegClass) |
413 | 284 | BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0) |
414 | 284 | .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); |
415 | 1.03k | else if (RC == &SP::IntPairRegClass) |
416 | 8 | BuildMI(MBB, I, DL, get(SP::STDri)).addFrameIndex(FI).addImm(0) |
417 | 8 | .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); |
418 | 1.02k | else if (RC == &SP::FPRegsRegClass) |
419 | 3 | BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0) |
420 | 3 | .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); |
421 | 1.02k | else if (SP::DFPRegsRegClass.hasSubClassEq(RC)) |
422 | 9 | BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0) |
423 | 9 | .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); |
424 | 1.01k | else if (SP::QFPRegsRegClass.hasSubClassEq(RC)) |
425 | 1.01k | // Use STQFri irrespective of its legality. If STQ is not legal, it will be |
426 | 1.01k | // lowered into two STDs in eliminateFrameIndex. |
427 | 1.01k | BuildMI(MBB, I, DL, get(SP::STQFri)).addFrameIndex(FI).addImm(0) |
428 | 1.01k | .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); |
429 | 1.01k | else |
430 | 1.01k | llvm_unreachable0 ("Can't store this register to stack slot"); |
431 | 1.33k | } |
432 | | |
433 | | void SparcInstrInfo:: |
434 | | loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, |
435 | | unsigned DestReg, int FI, |
436 | | const TargetRegisterClass *RC, |
437 | 1.15k | const TargetRegisterInfo *TRI) const { |
438 | 1.15k | DebugLoc DL; |
439 | 1.15k | if (I != MBB.end()) DL = I->getDebugLoc(); |
440 | 1.15k | |
441 | 1.15k | MachineFunction *MF = MBB.getParent(); |
442 | 1.15k | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
443 | 1.15k | MachineMemOperand *MMO = MF->getMachineMemOperand( |
444 | 1.15k | MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, |
445 | 1.15k | MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); |
446 | 1.15k | |
447 | 1.15k | if (RC == &SP::I64RegsRegClass) |
448 | 27 | BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0) |
449 | 27 | .addMemOperand(MMO); |
450 | 1.13k | else if (RC == &SP::IntRegsRegClass) |
451 | 96 | BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0) |
452 | 96 | .addMemOperand(MMO); |
453 | 1.03k | else if (RC == &SP::IntPairRegClass) |
454 | 8 | BuildMI(MBB, I, DL, get(SP::LDDri), DestReg).addFrameIndex(FI).addImm(0) |
455 | 8 | .addMemOperand(MMO); |
456 | 1.02k | else if (RC == &SP::FPRegsRegClass) |
457 | 3 | BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0) |
458 | 3 | .addMemOperand(MMO); |
459 | 1.02k | else if (SP::DFPRegsRegClass.hasSubClassEq(RC)) |
460 | 9 | BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0) |
461 | 9 | .addMemOperand(MMO); |
462 | 1.01k | else if (SP::QFPRegsRegClass.hasSubClassEq(RC)) |
463 | 1.01k | // Use LDQFri irrespective of its legality. If LDQ is not legal, it will be |
464 | 1.01k | // lowered into two LDDs in eliminateFrameIndex. |
465 | 1.01k | BuildMI(MBB, I, DL, get(SP::LDQFri), DestReg).addFrameIndex(FI).addImm(0) |
466 | 1.01k | .addMemOperand(MMO); |
467 | 1.01k | else |
468 | 1.01k | llvm_unreachable0 ("Can't load this register from stack slot"); |
469 | 1.15k | } |
470 | | |
471 | | unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const |
472 | 42 | { |
473 | 42 | SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>(); |
474 | 42 | unsigned GlobalBaseReg = SparcFI->getGlobalBaseReg(); |
475 | 42 | if (GlobalBaseReg != 0) |
476 | 10 | return GlobalBaseReg; |
477 | 32 | |
478 | 32 | // Insert the set of GlobalBaseReg into the first MBB of the function |
479 | 32 | MachineBasicBlock &FirstMBB = MF->front(); |
480 | 32 | MachineBasicBlock::iterator MBBI = FirstMBB.begin(); |
481 | 32 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
482 | 32 | |
483 | 32 | const TargetRegisterClass *PtrRC = |
484 | 32 | Subtarget.is64Bit() ? &SP::I64RegsRegClass16 : &SP::IntRegsRegClass16 ; |
485 | 32 | GlobalBaseReg = RegInfo.createVirtualRegister(PtrRC); |
486 | 32 | |
487 | 32 | DebugLoc dl; |
488 | 32 | |
489 | 32 | BuildMI(FirstMBB, MBBI, dl, get(SP::GETPCX), GlobalBaseReg); |
490 | 32 | SparcFI->setGlobalBaseReg(GlobalBaseReg); |
491 | 32 | return GlobalBaseReg; |
492 | 32 | } |
493 | | |
494 | 2.53k | bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
495 | 2.53k | switch (MI.getOpcode()) { |
496 | 2.53k | case TargetOpcode::LOAD_STACK_GUARD: { |
497 | 4 | assert(Subtarget.isTargetLinux() && |
498 | 4 | "Only Linux target is expected to contain LOAD_STACK_GUARD"); |
499 | 4 | // offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc. |
500 | 4 | const int64_t Offset = Subtarget.is64Bit() ? 0x282 : 0x142 ; |
501 | 4 | MI.setDesc(get(Subtarget.is64Bit() ? SP::LDXri2 : SP::LDri2 )); |
502 | 4 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
503 | 4 | .addReg(SP::G7) |
504 | 4 | .addImm(Offset); |
505 | 4 | return true; |
506 | 2.52k | } |
507 | 2.52k | } |
508 | 2.52k | return false; |
509 | 2.52k | } |