/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/X86/X86FastISel.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file defines the X86-specific support for the FastISel class. Much |
10 | | // of the target-specific code is generated by tablegen in the file |
11 | | // X86GenFastISel.inc, which is #included here. |
12 | | // |
13 | | //===----------------------------------------------------------------------===// |
14 | | |
15 | | #include "X86.h" |
16 | | #include "X86CallingConv.h" |
17 | | #include "X86InstrBuilder.h" |
18 | | #include "X86InstrInfo.h" |
19 | | #include "X86MachineFunctionInfo.h" |
20 | | #include "X86RegisterInfo.h" |
21 | | #include "X86Subtarget.h" |
22 | | #include "X86TargetMachine.h" |
23 | | #include "llvm/Analysis/BranchProbabilityInfo.h" |
24 | | #include "llvm/CodeGen/FastISel.h" |
25 | | #include "llvm/CodeGen/FunctionLoweringInfo.h" |
26 | | #include "llvm/CodeGen/MachineConstantPool.h" |
27 | | #include "llvm/CodeGen/MachineFrameInfo.h" |
28 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
29 | | #include "llvm/IR/CallSite.h" |
30 | | #include "llvm/IR/CallingConv.h" |
31 | | #include "llvm/IR/DebugInfo.h" |
32 | | #include "llvm/IR/DerivedTypes.h" |
33 | | #include "llvm/IR/GetElementPtrTypeIterator.h" |
34 | | #include "llvm/IR/GlobalAlias.h" |
35 | | #include "llvm/IR/GlobalVariable.h" |
36 | | #include "llvm/IR/Instructions.h" |
37 | | #include "llvm/IR/IntrinsicInst.h" |
38 | | #include "llvm/IR/Operator.h" |
39 | | #include "llvm/MC/MCAsmInfo.h" |
40 | | #include "llvm/MC/MCSymbol.h" |
41 | | #include "llvm/Support/ErrorHandling.h" |
42 | | #include "llvm/Target/TargetOptions.h" |
43 | | using namespace llvm; |
44 | | |
45 | | namespace { |
46 | | |
47 | | class X86FastISel final : public FastISel { |
48 | | /// Subtarget - Keep a pointer to the X86Subtarget around so that we can |
49 | | /// make the right decision when generating code for different targets. |
50 | | const X86Subtarget *Subtarget; |
51 | | |
52 | | /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 |
53 | | /// floating point ops. |
54 | | /// When SSE is available, use it for f32 operations. |
55 | | /// When SSE2 is available, use it for f64 operations. |
56 | | bool X86ScalarSSEf64; |
57 | | bool X86ScalarSSEf32; |
58 | | |
59 | | public: |
60 | | explicit X86FastISel(FunctionLoweringInfo &funcInfo, |
61 | | const TargetLibraryInfo *libInfo) |
62 | 9.56k | : FastISel(funcInfo, libInfo) { |
63 | 9.56k | Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>(); |
64 | 9.56k | X86ScalarSSEf64 = Subtarget->hasSSE2(); |
65 | 9.56k | X86ScalarSSEf32 = Subtarget->hasSSE1(); |
66 | 9.56k | } |
67 | | |
68 | | bool fastSelectInstruction(const Instruction *I) override; |
69 | | |
70 | | /// The specified machine instr operand is a vreg, and that |
71 | | /// vreg is being provided by the specified load instruction. If possible, |
72 | | /// try to fold the load as an operand to the instruction, returning true if |
73 | | /// possible. |
74 | | bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, |
75 | | const LoadInst *LI) override; |
76 | | |
77 | | bool fastLowerArguments() override; |
78 | | bool fastLowerCall(CallLoweringInfo &CLI) override; |
79 | | bool fastLowerIntrinsicCall(const IntrinsicInst *II) override; |
80 | | |
81 | | #include "X86GenFastISel.inc" |
82 | | |
83 | | private: |
84 | | bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, |
85 | | const DebugLoc &DL); |
86 | | |
87 | | bool X86FastEmitLoad(MVT VT, X86AddressMode &AM, MachineMemOperand *MMO, |
88 | | unsigned &ResultReg, unsigned Alignment = 1); |
89 | | |
90 | | bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM, |
91 | | MachineMemOperand *MMO = nullptr, bool Aligned = false); |
92 | | bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, |
93 | | X86AddressMode &AM, |
94 | | MachineMemOperand *MMO = nullptr, bool Aligned = false); |
95 | | |
96 | | bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, |
97 | | unsigned &ResultReg); |
98 | | |
99 | | bool X86SelectAddress(const Value *V, X86AddressMode &AM); |
100 | | bool X86SelectCallAddress(const Value *V, X86AddressMode &AM); |
101 | | |
102 | | bool X86SelectLoad(const Instruction *I); |
103 | | |
104 | | bool X86SelectStore(const Instruction *I); |
105 | | |
106 | | bool X86SelectRet(const Instruction *I); |
107 | | |
108 | | bool X86SelectCmp(const Instruction *I); |
109 | | |
110 | | bool X86SelectZExt(const Instruction *I); |
111 | | |
112 | | bool X86SelectSExt(const Instruction *I); |
113 | | |
114 | | bool X86SelectBranch(const Instruction *I); |
115 | | |
116 | | bool X86SelectShift(const Instruction *I); |
117 | | |
118 | | bool X86SelectDivRem(const Instruction *I); |
119 | | |
120 | | bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I); |
121 | | |
122 | | bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I); |
123 | | |
124 | | bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I); |
125 | | |
126 | | bool X86SelectSelect(const Instruction *I); |
127 | | |
128 | | bool X86SelectTrunc(const Instruction *I); |
129 | | |
130 | | bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc, |
131 | | const TargetRegisterClass *RC); |
132 | | |
133 | | bool X86SelectFPExt(const Instruction *I); |
134 | | bool X86SelectFPTrunc(const Instruction *I); |
135 | | bool X86SelectSIToFP(const Instruction *I); |
136 | | bool X86SelectUIToFP(const Instruction *I); |
137 | | bool X86SelectIntToFP(const Instruction *I, bool IsSigned); |
138 | | |
139 | 28 | const X86InstrInfo *getInstrInfo() const { |
140 | 28 | return Subtarget->getInstrInfo(); |
141 | 28 | } |
142 | 0 | const X86TargetMachine *getTargetMachine() const { |
143 | 0 | return static_cast<const X86TargetMachine *>(&TM); |
144 | 0 | } |
145 | | |
146 | | bool handleConstantAddresses(const Value *V, X86AddressMode &AM); |
147 | | |
148 | | unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT); |
149 | | unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT); |
150 | | unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT); |
151 | | unsigned fastMaterializeConstant(const Constant *C) override; |
152 | | |
153 | | unsigned fastMaterializeAlloca(const AllocaInst *C) override; |
154 | | |
155 | | unsigned fastMaterializeFloatZero(const ConstantFP *CF) override; |
156 | | |
157 | | /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is |
158 | | /// computed in an SSE register, not on the X87 floating point stack. |
159 | 1 | bool isScalarFPTypeInSSEReg(EVT VT) const { |
160 | 1 | return (VT == MVT::f64 && X86ScalarSSEf640 ) || // f64 is when SSE2 |
161 | 1 | (VT == MVT::f32 && X86ScalarSSEf320 ); // f32 is when SSE1 |
162 | 1 | } |
163 | | |
164 | | bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false); |
165 | | |
166 | | bool IsMemcpySmall(uint64_t Len); |
167 | | |
168 | | bool TryEmitSmallMemcpy(X86AddressMode DestAM, |
169 | | X86AddressMode SrcAM, uint64_t Len); |
170 | | |
171 | | bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, |
172 | | const Value *Cond); |
173 | | |
174 | | const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB, |
175 | | X86AddressMode &AM); |
176 | | |
177 | | unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode, |
178 | | const TargetRegisterClass *RC, unsigned Op0, |
179 | | bool Op0IsKill, unsigned Op1, bool Op1IsKill, |
180 | | unsigned Op2, bool Op2IsKill, unsigned Op3, |
181 | | bool Op3IsKill); |
182 | | }; |
183 | | |
184 | | } // end anonymous namespace. |
185 | | |
186 | | static std::pair<unsigned, bool> |
187 | 78 | getX86SSEConditionCode(CmpInst::Predicate Predicate) { |
188 | 78 | unsigned CC; |
189 | 78 | bool NeedSwap = false; |
190 | 78 | |
191 | 78 | // SSE Condition code mapping: |
192 | 78 | // 0 - EQ |
193 | 78 | // 1 - LT |
194 | 78 | // 2 - LE |
195 | 78 | // 3 - UNORD |
196 | 78 | // 4 - NEQ |
197 | 78 | // 5 - NLT |
198 | 78 | // 6 - NLE |
199 | 78 | // 7 - ORD |
200 | 78 | switch (Predicate) { |
201 | 78 | default: 0 llvm_unreachable0 ("Unexpected predicate"); |
202 | 78 | case CmpInst::FCMP_OEQ: CC = 0; break6 ; |
203 | 78 | case CmpInst::FCMP_OGT: NeedSwap = true; 6 LLVM_FALLTHROUGH6 ; |
204 | 12 | case CmpInst::FCMP_OLT: CC = 1; break; |
205 | 6 | case CmpInst::FCMP_OGE: NeedSwap = true; LLVM_FALLTHROUGH; |
206 | 12 | case CmpInst::FCMP_OLE: CC = 2; break; |
207 | 6 | case CmpInst::FCMP_UNO: CC = 3; break; |
208 | 6 | case CmpInst::FCMP_UNE: CC = 4; break; |
209 | 6 | case CmpInst::FCMP_ULE: NeedSwap = true; LLVM_FALLTHROUGH; |
210 | 12 | case CmpInst::FCMP_UGE: CC = 5; break; |
211 | 6 | case CmpInst::FCMP_ULT: NeedSwap = true; LLVM_FALLTHROUGH; |
212 | 12 | case CmpInst::FCMP_UGT: CC = 6; break; |
213 | 6 | case CmpInst::FCMP_ORD: CC = 7; break; |
214 | 6 | case CmpInst::FCMP_UEQ: CC = 8; break0 ; |
215 | 6 | case CmpInst::FCMP_ONE: CC = 12; break; |
216 | 78 | } |
217 | 78 | |
218 | 78 | return std::make_pair(CC, NeedSwap); |
219 | 78 | } |
220 | | |
221 | | /// Adds a complex addressing mode to the given machine instr builder. |
222 | | /// Note, this will constrain the index register. If its not possible to |
223 | | /// constrain the given index register, then a new one will be created. The |
224 | | /// IndexReg field of the addressing mode will be updated to match in this case. |
225 | | const MachineInstrBuilder & |
226 | | X86FastISel::addFullAddress(const MachineInstrBuilder &MIB, |
227 | 4.03k | X86AddressMode &AM) { |
228 | 4.03k | // First constrain the index register. It needs to be a GR64_NOSP. |
229 | 4.03k | AM.IndexReg = constrainOperandRegClass(MIB->getDesc(), AM.IndexReg, |
230 | 4.03k | MIB->getNumOperands() + |
231 | 4.03k | X86::AddrIndexReg); |
232 | 4.03k | return ::addFullAddress(MIB, AM); |
233 | 4.03k | } |
234 | | |
235 | | /// Check if it is possible to fold the condition from the XALU intrinsic |
236 | | /// into the user. The condition code will only be updated on success. |
237 | | bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, |
238 | 218 | const Value *Cond) { |
239 | 218 | if (!isa<ExtractValueInst>(Cond)) |
240 | 116 | return false; |
241 | 102 | |
242 | 102 | const auto *EV = cast<ExtractValueInst>(Cond); |
243 | 102 | if (!isa<IntrinsicInst>(EV->getAggregateOperand())) |
244 | 66 | return false; |
245 | 36 | |
246 | 36 | const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand()); |
247 | 36 | MVT RetVT; |
248 | 36 | const Function *Callee = II->getCalledFunction(); |
249 | 36 | Type *RetTy = |
250 | 36 | cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U); |
251 | 36 | if (!isTypeLegal(RetTy, RetVT)) |
252 | 0 | return false; |
253 | 36 | |
254 | 36 | if (RetVT != MVT::i32 && RetVT != MVT::i6416 ) |
255 | 4 | return false; |
256 | 32 | |
257 | 32 | X86::CondCode TmpCC; |
258 | 32 | switch (II->getIntrinsicID()) { |
259 | 32 | default: return false0 ; |
260 | 32 | case Intrinsic::sadd_with_overflow: |
261 | 23 | case Intrinsic::ssub_with_overflow: |
262 | 23 | case Intrinsic::smul_with_overflow: |
263 | 23 | case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break; |
264 | 23 | case Intrinsic::uadd_with_overflow: |
265 | 9 | case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break; |
266 | 32 | } |
267 | 32 | |
268 | 32 | // Check if both instructions are in the same basic block. |
269 | 32 | if (II->getParent() != I->getParent()) |
270 | 0 | return false; |
271 | 32 | |
272 | 32 | // Make sure nothing is in the way |
273 | 32 | BasicBlock::const_iterator Start(I); |
274 | 32 | BasicBlock::const_iterator End(II); |
275 | 78 | for (auto Itr = std::prev(Start); Itr != End; --Itr46 ) { |
276 | 48 | // We only expect extractvalue instructions between the intrinsic and the |
277 | 48 | // instruction to be selected. |
278 | 48 | if (!isa<ExtractValueInst>(Itr)) |
279 | 2 | return false; |
280 | 46 | |
281 | 46 | // Check that the extractvalue operand comes from the intrinsic. |
282 | 46 | const auto *EVI = cast<ExtractValueInst>(Itr); |
283 | 46 | if (EVI->getAggregateOperand() != II) |
284 | 0 | return false; |
285 | 46 | } |
286 | 32 | |
287 | 32 | CC = TmpCC; |
288 | 30 | return true; |
289 | 32 | } |
290 | | |
291 | 14.6k | bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) { |
292 | 14.6k | EVT evt = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true); |
293 | 14.6k | if (evt == MVT::Other || !evt.isSimple()14.6k ) |
294 | 9 | // Unhandled type. Halt "fast" selection and bail. |
295 | 9 | return false; |
296 | 14.6k | |
297 | 14.6k | VT = evt.getSimpleVT(); |
298 | 14.6k | // For now, require SSE/SSE2 for performing floating-point operations, |
299 | 14.6k | // since x87 requires additional work. |
300 | 14.6k | if (VT == MVT::f64 && !X86ScalarSSEf64140 ) |
301 | 13 | return false; |
302 | 14.6k | if (VT == MVT::f32 && !X86ScalarSSEf32326 ) |
303 | 9 | return false; |
304 | 14.5k | // Similarly, no f80 support yet. |
305 | 14.5k | if (VT == MVT::f80) |
306 | 4 | return false; |
307 | 14.5k | // We only handle legal types. For example, on x86-32 the instruction |
308 | 14.5k | // selector contains all of the 64-bit instructions from x86-64, |
309 | 14.5k | // under the assumption that i64 won't be used if the target doesn't |
310 | 14.5k | // support it. |
311 | 14.5k | return (AllowI1 && VT == MVT::i13.49k ) || TLI.isTypeLegal(VT)14.5k ; |
312 | 14.5k | } |
313 | | |
314 | | /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. |
315 | | /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. |
316 | | /// Return true and the result register by reference if it is possible. |
317 | | bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM, |
318 | | MachineMemOperand *MMO, unsigned &ResultReg, |
319 | 1.39k | unsigned Alignment) { |
320 | 1.39k | bool HasSSE41 = Subtarget->hasSSE41(); |
321 | 1.39k | bool HasAVX = Subtarget->hasAVX(); |
322 | 1.39k | bool HasAVX2 = Subtarget->hasAVX2(); |
323 | 1.39k | bool HasAVX512 = Subtarget->hasAVX512(); |
324 | 1.39k | bool HasVLX = Subtarget->hasVLX(); |
325 | 1.39k | bool IsNonTemporal = MMO && MMO->isNonTemporal()1.34k ; |
326 | 1.39k | |
327 | 1.39k | // Treat i1 loads the same as i8 loads. Masking will be done when storing. |
328 | 1.39k | if (VT == MVT::i1) |
329 | 2 | VT = MVT::i8; |
330 | 1.39k | |
331 | 1.39k | // Get opcode and regclass of the output for the given load instruction. |
332 | 1.39k | unsigned Opc = 0; |
333 | 1.39k | switch (VT.SimpleTy) { |
334 | 1.39k | default: return false8 ; |
335 | 1.39k | case MVT::i8: |
336 | 58 | Opc = X86::MOV8rm; |
337 | 58 | break; |
338 | 1.39k | case MVT::i16: |
339 | 25 | Opc = X86::MOV16rm; |
340 | 25 | break; |
341 | 1.39k | case MVT::i32: |
342 | 650 | Opc = X86::MOV32rm; |
343 | 650 | break; |
344 | 1.39k | case MVT::i64: |
345 | 316 | // Must be in x86-64 mode. |
346 | 316 | Opc = X86::MOV64rm; |
347 | 316 | break; |
348 | 1.39k | case MVT::f32: |
349 | 17 | if (X86ScalarSSEf32) |
350 | 17 | Opc = HasAVX512 ? X86::VMOVSSZrm_alt1 : |
351 | 17 | HasAVX 16 ? X86::VMOVSSrm_alt1 : |
352 | 16 | X86::MOVSSrm_alt15 ; |
353 | 0 | else |
354 | 0 | Opc = X86::LD_Fp32m; |
355 | 17 | break; |
356 | 1.39k | case MVT::f64: |
357 | 17 | if (X86ScalarSSEf64) |
358 | 17 | Opc = HasAVX512 ? X86::VMOVSDZrm_alt1 : |
359 | 17 | HasAVX 16 ? X86::VMOVSDrm_alt2 : |
360 | 16 | X86::MOVSDrm_alt14 ; |
361 | 0 | else |
362 | 0 | Opc = X86::LD_Fp64m; |
363 | 17 | break; |
364 | 1.39k | case MVT::f80: |
365 | 0 | // No f80 support yet. |
366 | 0 | return false; |
367 | 1.39k | case MVT::v4f32: |
368 | 48 | if (IsNonTemporal && Alignment >= 168 && HasSSE418 ) |
369 | 6 | Opc = HasVLX ? X86::VMOVNTDQAZ128rm1 : |
370 | 6 | HasAVX 5 ? X86::VMOVNTDQArm4 : X86::MOVNTDQArm1 ; |
371 | 42 | else if (Alignment >= 16) |
372 | 32 | Opc = HasVLX ? X86::VMOVAPSZ128rm4 : |
373 | 32 | HasAVX 28 ? X86::VMOVAPSrm15 : X86::MOVAPSrm13 ; |
374 | 10 | else |
375 | 10 | Opc = HasVLX ? X86::VMOVUPSZ128rm3 : |
376 | 10 | HasAVX 7 ? X86::VMOVUPSrm4 : X86::MOVUPSrm3 ; |
377 | 48 | break; |
378 | 1.39k | case MVT::v2f64: |
379 | 38 | if (IsNonTemporal && Alignment >= 168 && HasSSE418 ) |
380 | 6 | Opc = HasVLX ? X86::VMOVNTDQAZ128rm1 : |
381 | 6 | HasAVX 5 ? X86::VMOVNTDQArm4 : X86::MOVNTDQArm1 ; |
382 | 32 | else if (Alignment >= 16) |
383 | 22 | Opc = HasVLX ? X86::VMOVAPDZ128rm6 : |
384 | 22 | HasAVX 16 ? X86::VMOVAPDrm8 : X86::MOVAPDrm8 ; |
385 | 10 | else |
386 | 10 | Opc = HasVLX ? X86::VMOVUPDZ128rm3 : |
387 | 10 | HasAVX 7 ? X86::VMOVUPDrm4 : X86::MOVUPDrm3 ; |
388 | 38 | break; |
389 | 1.39k | case MVT::v4i32: |
390 | 98 | case MVT::v2i64: |
391 | 98 | case MVT::v8i16: |
392 | 98 | case MVT::v16i8: |
393 | 98 | if (IsNonTemporal && Alignment >= 1638 && HasSSE4138 ) |
394 | 30 | Opc = HasVLX ? X86::VMOVNTDQAZ128rm6 : |
395 | 30 | HasAVX 24 ? X86::VMOVNTDQArm18 : X86::MOVNTDQArm6 ; |
396 | 68 | else if (Alignment >= 16) |
397 | 46 | Opc = HasVLX ? X86::VMOVDQA64Z128rm10 : |
398 | 46 | HasAVX 36 ? X86::VMOVDQArm18 : X86::MOVDQArm18 ; |
399 | 22 | else |
400 | 22 | Opc = HasVLX ? X86::VMOVDQU64Z128rm6 : |
401 | 22 | HasAVX 16 ? X86::VMOVDQUrm10 : X86::MOVDQUrm6 ; |
402 | 98 | break; |
403 | 98 | case MVT::v8f32: |
404 | 19 | assert(HasAVX); |
405 | 19 | if (IsNonTemporal && Alignment >= 325 && HasAVX25 ) |
406 | 4 | Opc = HasVLX ? X86::VMOVNTDQAZ256rm1 : X86::VMOVNTDQAYrm3 ; |
407 | 15 | else if (IsNonTemporal && Alignment >= 161 ) |
408 | 1 | return false; // Force split for X86::VMOVNTDQArm |
409 | 14 | else if (Alignment >= 32) |
410 | 9 | Opc = HasVLX ? X86::VMOVAPSZ256rm2 : X86::VMOVAPSYrm7 ; |
411 | 5 | else |
412 | 5 | Opc = HasVLX ? X86::VMOVUPSZ256rm1 : X86::VMOVUPSYrm4 ; |
413 | 19 | break18 ; |
414 | 19 | case MVT::v4f64: |
415 | 16 | assert(HasAVX); |
416 | 16 | if (IsNonTemporal && Alignment >= 325 && HasAVX25 ) |
417 | 4 | Opc = HasVLX ? X86::VMOVNTDQAZ256rm1 : X86::VMOVNTDQAYrm3 ; |
418 | 12 | else if (IsNonTemporal && Alignment >= 161 ) |
419 | 1 | return false; // Force split for X86::VMOVNTDQArm |
420 | 11 | else if (Alignment >= 32) |
421 | 6 | Opc = HasVLX ? X86::VMOVAPDZ256rm1 : X86::VMOVAPDYrm5 ; |
422 | 5 | else |
423 | 5 | Opc = HasVLX ? X86::VMOVUPDZ256rm1 : X86::VMOVUPDYrm4 ; |
424 | 16 | break15 ; |
425 | 51 | case MVT::v8i32: |
426 | 51 | case MVT::v4i64: |
427 | 51 | case MVT::v16i16: |
428 | 51 | case MVT::v32i8: |
429 | 51 | assert(HasAVX); |
430 | 51 | if (IsNonTemporal && Alignment >= 3222 && HasAVX222 ) |
431 | 18 | Opc = HasVLX ? X86::VMOVNTDQAZ256rm4 : X86::VMOVNTDQAYrm14 ; |
432 | 33 | else if (IsNonTemporal && Alignment >= 164 ) |
433 | 4 | return false; // Force split for X86::VMOVNTDQArm |
434 | 29 | else if (Alignment >= 32) |
435 | 15 | Opc = HasVLX ? X86::VMOVDQA64Z256rm4 : X86::VMOVDQAYrm11 ; |
436 | 14 | else |
437 | 14 | Opc = HasVLX ? X86::VMOVDQU64Z256rm4 : X86::VMOVDQUYrm10 ; |
438 | 51 | break47 ; |
439 | 51 | case MVT::v16f32: |
440 | 6 | assert(HasAVX512); |
441 | 6 | if (IsNonTemporal && Alignment >= 643 ) |
442 | 3 | Opc = X86::VMOVNTDQAZrm; |
443 | 3 | else |
444 | 3 | Opc = (Alignment >= 64) ? X86::VMOVAPSZrm1 : X86::VMOVUPSZrm2 ; |
445 | 6 | break; |
446 | 51 | case MVT::v8f64: |
447 | 7 | assert(HasAVX512); |
448 | 7 | if (IsNonTemporal && Alignment >= 643 ) |
449 | 3 | Opc = X86::VMOVNTDQAZrm; |
450 | 4 | else |
451 | 4 | Opc = (Alignment >= 64) ? X86::VMOVAPDZrm2 : X86::VMOVUPDZrm2 ; |
452 | 7 | break; |
453 | 51 | case MVT::v8i64: |
454 | 20 | case MVT::v16i32: |
455 | 20 | case MVT::v32i16: |
456 | 20 | case MVT::v64i8: |
457 | 20 | assert(HasAVX512); |
458 | 20 | // Note: There are a lot more choices based on type with AVX-512, but |
459 | 20 | // there's really no advantage when the load isn't masked. |
460 | 20 | if (IsNonTemporal && Alignment >= 648 ) |
461 | 8 | Opc = X86::VMOVNTDQAZrm; |
462 | 12 | else |
463 | 12 | Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm6 : X86::VMOVDQU64Zrm6 ; |
464 | 20 | break; |
465 | 1.38k | } |
466 | 1.38k | |
467 | 1.38k | const TargetRegisterClass *RC = TLI.getRegClassFor(VT); |
468 | 1.38k | |
469 | 1.38k | ResultReg = createResultReg(RC); |
470 | 1.38k | MachineInstrBuilder MIB = |
471 | 1.38k | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); |
472 | 1.38k | addFullAddress(MIB, AM); |
473 | 1.38k | if (MMO) |
474 | 1.33k | MIB->addMemOperand(*FuncInfo.MF, MMO); |
475 | 1.38k | return true; |
476 | 1.38k | } |
477 | | |
478 | | /// X86FastEmitStore - Emit a machine instruction to store a value Val of |
479 | | /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr |
480 | | /// and a displacement offset, or a GlobalAddress, |
481 | | /// i.e. V. Return true if it is possible. |
482 | | bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, |
483 | | X86AddressMode &AM, |
484 | 1.76k | MachineMemOperand *MMO, bool Aligned) { |
485 | 1.76k | bool HasSSE1 = Subtarget->hasSSE1(); |
486 | 1.76k | bool HasSSE2 = Subtarget->hasSSE2(); |
487 | 1.76k | bool HasSSE4A = Subtarget->hasSSE4A(); |
488 | 1.76k | bool HasAVX = Subtarget->hasAVX(); |
489 | 1.76k | bool HasAVX512 = Subtarget->hasAVX512(); |
490 | 1.76k | bool HasVLX = Subtarget->hasVLX(); |
491 | 1.76k | bool IsNonTemporal = MMO && MMO->isNonTemporal()1.68k ; |
492 | 1.76k | |
493 | 1.76k | // Get opcode and regclass of the output for the given store instruction. |
494 | 1.76k | unsigned Opc = 0; |
495 | 1.76k | switch (VT.getSimpleVT().SimpleTy) { |
496 | 1.76k | case MVT::f80: // No f80 support yet. |
497 | 0 | default: return false; |
498 | 4 | case MVT::i1: { |
499 | 4 | // Mask out all but lowest bit. |
500 | 4 | unsigned AndResult = createResultReg(&X86::GR8RegClass); |
501 | 4 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
502 | 4 | TII.get(X86::AND8ri), AndResult) |
503 | 4 | .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1); |
504 | 4 | ValReg = AndResult; |
505 | 4 | LLVM_FALLTHROUGH; // handle i1 as i8. |
506 | 4 | } |
507 | 58 | case MVT::i8: Opc = X86::MOV8mr; break; |
508 | 40 | case MVT::i16: Opc = X86::MOV16mr; break; |
509 | 719 | case MVT::i32: |
510 | 719 | Opc = (IsNonTemporal && HasSSE214 ) ? X86::MOVNTImr14 : X86::MOV32mr705 ; |
511 | 719 | break; |
512 | 489 | case MVT::i64: |
513 | 489 | // Must be in x86-64 mode. |
514 | 489 | Opc = (IsNonTemporal && HasSSE211 ) ? X86::MOVNTI_64mr11 : X86::MOV64mr478 ; |
515 | 489 | break; |
516 | 38 | case MVT::f32: |
517 | 38 | if (X86ScalarSSEf32) { |
518 | 38 | if (IsNonTemporal && HasSSE4A12 ) |
519 | 5 | Opc = X86::MOVNTSS; |
520 | 33 | else |
521 | 33 | Opc = HasAVX512 ? X86::VMOVSSZmr5 : |
522 | 33 | HasAVX 28 ? X86::VMOVSSmr5 : X86::MOVSSmr23 ; |
523 | 38 | } else |
524 | 0 | Opc = X86::ST_Fp32m; |
525 | 38 | break; |
526 | 44 | case MVT::f64: |
527 | 44 | if (X86ScalarSSEf32) { |
528 | 44 | if (IsNonTemporal && HasSSE4A12 ) |
529 | 5 | Opc = X86::MOVNTSD; |
530 | 39 | else |
531 | 39 | Opc = HasAVX512 ? X86::VMOVSDZmr9 : |
532 | 39 | HasAVX 30 ? X86::VMOVSDmr10 : X86::MOVSDmr20 ; |
533 | 44 | } else |
534 | 0 | Opc = X86::ST_Fp64m; |
535 | 44 | break; |
536 | 8 | case MVT::x86mmx: |
537 | 8 | Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr0 ; |
538 | 8 | break; |
539 | 98 | case MVT::v4f32: |
540 | 98 | if (Aligned) { |
541 | 81 | if (IsNonTemporal) |
542 | 14 | Opc = HasVLX ? X86::VMOVNTPSZ128mr3 : |
543 | 14 | HasAVX 11 ? X86::VMOVNTPSmr6 : X86::MOVNTPSmr5 ; |
544 | 67 | else |
545 | 67 | Opc = HasVLX ? X86::VMOVAPSZ128mr17 : |
546 | 67 | HasAVX 50 ? X86::VMOVAPSmr30 : X86::MOVAPSmr20 ; |
547 | 81 | } else |
548 | 17 | Opc = HasVLX ? X86::VMOVUPSZ128mr3 : |
549 | 17 | HasAVX 14 ? X86::VMOVUPSmr9 : X86::MOVUPSmr5 ; |
550 | 98 | break; |
551 | 62 | case MVT::v2f64: |
552 | 62 | if (Aligned) { |
553 | 46 | if (IsNonTemporal) |
554 | 14 | Opc = HasVLX ? X86::VMOVNTPDZ128mr3 : |
555 | 14 | HasAVX 11 ? X86::VMOVNTPDmr6 : X86::MOVNTPDmr5 ; |
556 | 32 | else |
557 | 32 | Opc = HasVLX ? X86::VMOVAPDZ128mr9 : |
558 | 32 | HasAVX 23 ? X86::VMOVAPDmr13 : X86::MOVAPDmr10 ; |
559 | 46 | } else |
560 | 16 | Opc = HasVLX ? X86::VMOVUPDZ128mr3 : |
561 | 16 | HasAVX 13 ? X86::VMOVUPDmr9 : X86::MOVUPDmr4 ; |
562 | 62 | break; |
563 | 69 | case MVT::v4i32: |
564 | 69 | case MVT::v2i64: |
565 | 69 | case MVT::v8i16: |
566 | 69 | case MVT::v16i8: |
567 | 69 | if (Aligned) { |
568 | 52 | if (IsNonTemporal) |
569 | 38 | Opc = HasVLX ? X86::VMOVNTDQZ128mr6 : |
570 | 38 | HasAVX 32 ? X86::VMOVNTDQmr18 : X86::MOVNTDQmr14 ; |
571 | 14 | else |
572 | 14 | Opc = HasVLX ? X86::VMOVDQA64Z128mr3 : |
573 | 14 | HasAVX 11 ? X86::VMOVDQAmr7 : X86::MOVDQAmr4 ; |
574 | 52 | } else |
575 | 17 | Opc = HasVLX ? X86::VMOVDQU64Z128mr3 : |
576 | 17 | HasAVX 14 ? X86::VMOVDQUmr9 : X86::MOVDQUmr5 ; |
577 | 69 | break; |
578 | 69 | case MVT::v8f32: |
579 | 24 | assert(HasAVX); |
580 | 24 | if (Aligned) { |
581 | 16 | if (IsNonTemporal) |
582 | 7 | Opc = HasVLX ? X86::VMOVNTPSZ256mr1 : X86::VMOVNTPSYmr6 ; |
583 | 9 | else |
584 | 9 | Opc = HasVLX ? X86::VMOVAPSZ256mr1 : X86::VMOVAPSYmr8 ; |
585 | 16 | } else |
586 | 8 | Opc = HasVLX ? X86::VMOVUPSZ256mr1 : X86::VMOVUPSYmr7 ; |
587 | 24 | break; |
588 | 69 | case MVT::v4f64: |
589 | 24 | assert(HasAVX); |
590 | 24 | if (Aligned) { |
591 | 16 | if (IsNonTemporal) |
592 | 7 | Opc = HasVLX ? X86::VMOVNTPDZ256mr1 : X86::VMOVNTPDYmr6 ; |
593 | 9 | else |
594 | 9 | Opc = HasVLX ? X86::VMOVAPDZ256mr1 : X86::VMOVAPDYmr8 ; |
595 | 16 | } else |
596 | 8 | Opc = HasVLX ? X86::VMOVUPDZ256mr1 : X86::VMOVUPDYmr7 ; |
597 | 24 | break; |
598 | 69 | case MVT::v8i32: |
599 | 49 | case MVT::v4i64: |
600 | 49 | case MVT::v16i16: |
601 | 49 | case MVT::v32i8: |
602 | 49 | assert(HasAVX); |
603 | 49 | if (Aligned) { |
604 | 35 | if (IsNonTemporal) |
605 | 22 | Opc = HasVLX ? X86::VMOVNTDQZ256mr4 : X86::VMOVNTDQYmr18 ; |
606 | 13 | else |
607 | 13 | Opc = HasVLX ? X86::VMOVDQA64Z256mr1 : X86::VMOVDQAYmr12 ; |
608 | 35 | } else |
609 | 14 | Opc = HasVLX ? X86::VMOVDQU64Z256mr1 : X86::VMOVDQUYmr13 ; |
610 | 49 | break; |
611 | 49 | case MVT::v16f32: |
612 | 12 | assert(HasAVX512); |
613 | 12 | if (Aligned) |
614 | 8 | Opc = IsNonTemporal ? X86::VMOVNTPSZmr3 : X86::VMOVAPSZmr5 ; |
615 | 4 | else |
616 | 4 | Opc = X86::VMOVUPSZmr; |
617 | 12 | break; |
618 | 49 | case MVT::v8f64: |
619 | 11 | assert(HasAVX512); |
620 | 11 | if (Aligned) { |
621 | 7 | Opc = IsNonTemporal ? X86::VMOVNTPDZmr3 : X86::VMOVAPDZmr4 ; |
622 | 7 | } else |
623 | 4 | Opc = X86::VMOVUPDZmr; |
624 | 11 | break; |
625 | 49 | case MVT::v8i64: |
626 | 17 | case MVT::v16i32: |
627 | 17 | case MVT::v32i16: |
628 | 17 | case MVT::v64i8: |
629 | 17 | assert(HasAVX512); |
630 | 17 | // Note: There are a lot more choices based on type with AVX-512, but |
631 | 17 | // there's really no advantage when the store isn't masked. |
632 | 17 | if (Aligned) |
633 | 13 | Opc = IsNonTemporal ? X86::VMOVNTDQZmr8 : X86::VMOVDQA64Zmr5 ; |
634 | 4 | else |
635 | 4 | Opc = X86::VMOVDQU64Zmr; |
636 | 17 | break; |
637 | 1.76k | } |
638 | 1.76k | |
639 | 1.76k | const MCInstrDesc &Desc = TII.get(Opc); |
640 | 1.76k | // Some of the instructions in the previous switch use FR128 instead |
641 | 1.76k | // of FR32 for ValReg. Make sure the register we feed the instruction |
642 | 1.76k | // matches its register class constraints. |
643 | 1.76k | // Note: This is fine to do a copy from FR32 to FR128, this is the |
644 | 1.76k | // same registers behind the scene and actually why it did not trigger |
645 | 1.76k | // any bugs before. |
646 | 1.76k | ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1); |
647 | 1.76k | MachineInstrBuilder MIB = |
648 | 1.76k | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc); |
649 | 1.76k | addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill)); |
650 | 1.76k | if (MMO) |
651 | 1.68k | MIB->addMemOperand(*FuncInfo.MF, MMO); |
652 | 1.76k | |
653 | 1.76k | return true; |
654 | 1.76k | } |
655 | | |
656 | | bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, |
657 | | X86AddressMode &AM, |
658 | 2.06k | MachineMemOperand *MMO, bool Aligned) { |
659 | 2.06k | // Handle 'null' like i32/i64 0. |
660 | 2.06k | if (isa<ConstantPointerNull>(Val)) |
661 | 29 | Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext())); |
662 | 2.06k | |
663 | 2.06k | // If this is a store of a simple constant, fold the constant into the store. |
664 | 2.06k | if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { |
665 | 486 | unsigned Opc = 0; |
666 | 486 | bool Signed = true; |
667 | 486 | switch (VT.getSimpleVT().SimpleTy) { |
668 | 486 | default: break0 ; |
669 | 486 | case MVT::i1: |
670 | 10 | Signed = false; |
671 | 10 | LLVM_FALLTHROUGH; // Handle as i8. |
672 | 27 | case MVT::i8: Opc = X86::MOV8mi; break; |
673 | 10 | case MVT::i16: Opc = X86::MOV16mi; break9 ; |
674 | 368 | case MVT::i32: Opc = X86::MOV32mi; break; |
675 | 82 | case MVT::i64: |
676 | 82 | // Must be a 32-bit sign extended value. |
677 | 82 | if (isInt<32>(CI->getSExtValue())) |
678 | 81 | Opc = X86::MOV64mi32; |
679 | 82 | break; |
680 | 486 | } |
681 | 486 | |
682 | 486 | if (Opc) { |
683 | 485 | MachineInstrBuilder MIB = |
684 | 485 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); |
685 | 485 | addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()475 |
686 | 485 | : CI->getZExtValue()10 ); |
687 | 485 | if (MMO) |
688 | 485 | MIB->addMemOperand(*FuncInfo.MF, MMO); |
689 | 485 | return true; |
690 | 485 | } |
691 | 1.58k | } |
692 | 1.58k | |
693 | 1.58k | unsigned ValReg = getRegForValue(Val); |
694 | 1.58k | if (ValReg == 0) |
695 | 3 | return false; |
696 | 1.58k | |
697 | 1.58k | bool ValKill = hasTrivialKill(Val); |
698 | 1.58k | return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned); |
699 | 1.58k | } |
700 | | |
701 | | /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of |
702 | | /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g. |
703 | | /// ISD::SIGN_EXTEND). |
704 | | bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, |
705 | | unsigned Src, EVT SrcVT, |
706 | 14 | unsigned &ResultReg) { |
707 | 14 | unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, |
708 | 14 | Src, /*TODO: Kill=*/false); |
709 | 14 | if (RR == 0) |
710 | 3 | return false; |
711 | 11 | |
712 | 11 | ResultReg = RR; |
713 | 11 | return true; |
714 | 11 | } |
715 | | |
716 | 2.49k | bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { |
717 | 2.49k | // Handle constant address. |
718 | 2.49k | if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { |
719 | 699 | // Can't handle alternate code models yet. |
720 | 699 | if (TM.getCodeModel() != CodeModel::Small) |
721 | 59 | return false; |
722 | 640 | |
723 | 640 | // Can't handle TLS yet. |
724 | 640 | if (GV->isThreadLocal()) |
725 | 79 | return false; |
726 | 561 | |
727 | 561 | // Can't handle !absolute_symbol references yet. |
728 | 561 | if (GV->isAbsoluteSymbolRef()) |
729 | 2 | return false; |
730 | 559 | |
731 | 559 | // RIP-relative addresses can't have additional register operands, so if |
732 | 559 | // we've already folded stuff into the addressing mode, just force the |
733 | 559 | // global value into its own register, which we can use as the basereg. |
734 | 559 | if (!Subtarget->isPICStyleRIPRel() || |
735 | 559 | (225 AM.Base.Reg == 0225 && AM.IndexReg == 0225 )) { |
736 | 542 | // Okay, we've committed to selecting this global. Set up the address. |
737 | 542 | AM.GV = GV; |
738 | 542 | |
739 | 542 | // Allow the subtarget to classify the global. |
740 | 542 | unsigned char GVFlags = Subtarget->classifyGlobalReference(GV); |
741 | 542 | |
742 | 542 | // If this reference is relative to the pic base, set it now. |
743 | 542 | if (isGlobalRelativeToPICBase(GVFlags)) { |
744 | 16 | // FIXME: How do we know Base.Reg is free?? |
745 | 16 | AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); |
746 | 16 | } |
747 | 542 | |
748 | 542 | // Unless the ABI requires an extra load, return a direct reference to |
749 | 542 | // the global. |
750 | 542 | if (!isGlobalStubReference(GVFlags)) { |
751 | 448 | if (Subtarget->isPICStyleRIPRel()) { |
752 | 148 | // Use rip-relative addressing if we can. Above we verified that the |
753 | 148 | // base and index registers are unused. |
754 | 148 | assert(AM.Base.Reg == 0 && AM.IndexReg == 0); |
755 | 148 | AM.Base.Reg = X86::RIP; |
756 | 148 | } |
757 | 448 | AM.GVOpFlags = GVFlags; |
758 | 448 | return true; |
759 | 448 | } |
760 | 94 | |
761 | 94 | // Ok, we need to do a load from a stub. If we've already loaded from |
762 | 94 | // this stub, reuse the loaded pointer, otherwise emit the load now. |
763 | 94 | DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V); |
764 | 94 | unsigned LoadReg; |
765 | 94 | if (I != LocalValueMap.end() && I->second != 054 ) { |
766 | 25 | LoadReg = I->second; |
767 | 69 | } else { |
768 | 69 | // Issue load from stub. |
769 | 69 | unsigned Opc = 0; |
770 | 69 | const TargetRegisterClass *RC = nullptr; |
771 | 69 | X86AddressMode StubAM; |
772 | 69 | StubAM.Base.Reg = AM.Base.Reg; |
773 | 69 | StubAM.GV = GV; |
774 | 69 | StubAM.GVOpFlags = GVFlags; |
775 | 69 | |
776 | 69 | // Prepare for inserting code in the local-value area. |
777 | 69 | SavePoint SaveInsertPt = enterLocalValueArea(); |
778 | 69 | |
779 | 69 | if (TLI.getPointerTy(DL) == MVT::i64) { |
780 | 47 | Opc = X86::MOV64rm; |
781 | 47 | RC = &X86::GR64RegClass; |
782 | 47 | |
783 | 47 | if (Subtarget->isPICStyleRIPRel()) |
784 | 47 | StubAM.Base.Reg = X86::RIP; |
785 | 47 | } else { |
786 | 22 | Opc = X86::MOV32rm; |
787 | 22 | RC = &X86::GR32RegClass; |
788 | 22 | } |
789 | 69 | |
790 | 69 | LoadReg = createResultReg(RC); |
791 | 69 | MachineInstrBuilder LoadMI = |
792 | 69 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg); |
793 | 69 | addFullAddress(LoadMI, StubAM); |
794 | 69 | |
795 | 69 | // Ok, back to normal mode. |
796 | 69 | leaveLocalValueArea(SaveInsertPt); |
797 | 69 | |
798 | 69 | // Prevent loading GV stub multiple times in same MBB. |
799 | 69 | LocalValueMap[V] = LoadReg; |
800 | 69 | } |
801 | 94 | |
802 | 94 | // Now construct the final address. Note that the Disp, Scale, |
803 | 94 | // and Index values may already be set here. |
804 | 94 | AM.Base.Reg = LoadReg; |
805 | 94 | AM.GV = nullptr; |
806 | 94 | return true; |
807 | 94 | } |
808 | 559 | } |
809 | 1.81k | |
810 | 1.81k | // If all else fails, try to materialize the value in a register. |
811 | 1.81k | if (!AM.GV || !Subtarget->isPICStyleRIPRel()0 ) { |
812 | 1.81k | if (AM.Base.Reg == 0) { |
813 | 1.81k | AM.Base.Reg = getRegForValue(V); |
814 | 1.81k | return AM.Base.Reg != 0; |
815 | 1.81k | } |
816 | 0 | if (AM.IndexReg == 0) { |
817 | 0 | assert(AM.Scale == 1 && "Scale with no index!"); |
818 | 0 | AM.IndexReg = getRegForValue(V); |
819 | 0 | return AM.IndexReg != 0; |
820 | 0 | } |
821 | 0 | } |
822 | 0 | |
823 | 0 | return false; |
824 | 0 | } |
825 | | |
826 | | /// X86SelectAddress - Attempt to fill in an address from the given value. |
827 | | /// |
828 | 6.05k | bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { |
829 | 6.05k | SmallVector<const Value *, 32> GEPs; |
830 | 6.07k | redo_gep: |
831 | 6.07k | const User *U = nullptr; |
832 | 6.07k | unsigned Opcode = Instruction::UserOp1; |
833 | 6.07k | if (const Instruction *I = dyn_cast<Instruction>(V)) { |
834 | 3.86k | // Don't walk into other basic blocks; it's possible we haven't |
835 | 3.86k | // visited them yet, so the instructions may not yet be assigned |
836 | 3.86k | // virtual registers. |
837 | 3.86k | if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) || |
838 | 3.86k | FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB1.24k ) { |
839 | 3.81k | Opcode = I->getOpcode(); |
840 | 3.81k | U = I; |
841 | 3.81k | } |
842 | 3.86k | } else if (const ConstantExpr *2.21k C2.21k = dyn_cast<ConstantExpr>(V)) { |
843 | 33 | Opcode = C->getOpcode(); |
844 | 33 | U = C; |
845 | 33 | } |
846 | 6.07k | |
847 | 6.07k | if (PointerType *Ty = dyn_cast<PointerType>(V->getType())) |
848 | 6.05k | if (Ty->getAddressSpace() > 255) |
849 | 2 | // Fast instruction selection doesn't support the special |
850 | 2 | // address spaces. |
851 | 2 | return false; |
852 | 6.07k | |
853 | 6.07k | switch (Opcode) { |
854 | 6.07k | default: break2.46k ; |
855 | 6.07k | case Instruction::BitCast: |
856 | 440 | // Look past bitcasts. |
857 | 440 | return X86SelectAddress(U->getOperand(0), AM); |
858 | 6.07k | |
859 | 6.07k | case Instruction::IntToPtr: |
860 | 15 | // Look past no-op inttoptrs. |
861 | 15 | if (TLI.getValueType(DL, U->getOperand(0)->getType()) == |
862 | 15 | TLI.getPointerTy(DL)) |
863 | 15 | return X86SelectAddress(U->getOperand(0), AM); |
864 | 0 | break; |
865 | 0 |
|
866 | 3 | case Instruction::PtrToInt: |
867 | 3 | // Look past no-op ptrtoints. |
868 | 3 | if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) |
869 | 3 | return X86SelectAddress(U->getOperand(0), AM); |
870 | 0 | break; |
871 | 0 |
|
872 | 2.62k | case Instruction::Alloca: { |
873 | 2.62k | // Do static allocas. |
874 | 2.62k | const AllocaInst *A = cast<AllocaInst>(V); |
875 | 2.62k | DenseMap<const AllocaInst *, int>::iterator SI = |
876 | 2.62k | FuncInfo.StaticAllocaMap.find(A); |
877 | 2.62k | if (SI != FuncInfo.StaticAllocaMap.end()) { |
878 | 2.61k | AM.BaseType = X86AddressMode::FrameIndexBase; |
879 | 2.61k | AM.Base.FrameIndex = SI->second; |
880 | 2.61k | return true; |
881 | 2.61k | } |
882 | 4 | break; |
883 | 4 | } |
884 | 4 | |
885 | 12 | case Instruction::Add: { |
886 | 12 | // Adds of constants are common and easy enough. |
887 | 12 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { |
888 | 12 | uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue(); |
889 | 12 | // They have to fit in the 32-bit signed displacement field though. |
890 | 12 | if (isInt<32>(Disp)) { |
891 | 12 | AM.Disp = (uint32_t)Disp; |
892 | 12 | return X86SelectAddress(U->getOperand(0), AM); |
893 | 12 | } |
894 | 0 | } |
895 | 0 | break; |
896 | 0 | } |
897 | 0 |
|
898 | 519 | case Instruction::GetElementPtr: { |
899 | 519 | X86AddressMode SavedAM = AM; |
900 | 519 | |
901 | 519 | // Pattern-match simple GEPs. |
902 | 519 | uint64_t Disp = (int32_t)AM.Disp; |
903 | 519 | unsigned IndexReg = AM.IndexReg; |
904 | 519 | unsigned Scale = AM.Scale; |
905 | 519 | gep_type_iterator GTI = gep_type_begin(U); |
906 | 519 | // Iterate through the indices, folding what we can. Constants can be |
907 | 519 | // folded, and one dynamic index can be handled, if the scale is supported. |
908 | 519 | for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); |
909 | 1.39k | i != e; ++i, ++GTI877 ) { |
910 | 882 | const Value *Op = *i; |
911 | 882 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
912 | 279 | const StructLayout *SL = DL.getStructLayout(STy); |
913 | 279 | Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue()); |
914 | 279 | continue; |
915 | 279 | } |
916 | 603 | |
917 | 603 | // A array/variable index is always of the form i*S where S is the |
918 | 603 | // constant scale size. See if we can push the scale into immediates. |
919 | 603 | uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); |
920 | 607 | for (;;) { |
921 | 607 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { |
922 | 519 | // Constant-offset addressing. |
923 | 519 | Disp += CI->getSExtValue() * S; |
924 | 519 | break; |
925 | 519 | } |
926 | 88 | if (canFoldAddIntoGEP(U, Op)) { |
927 | 4 | // A compatible add with a constant operand. Fold the constant. |
928 | 4 | ConstantInt *CI = |
929 | 4 | cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); |
930 | 4 | Disp += CI->getSExtValue() * S; |
931 | 4 | // Iterate on the other operand. |
932 | 4 | Op = cast<AddOperator>(Op)->getOperand(0); |
933 | 4 | continue; |
934 | 4 | } |
935 | 84 | if (IndexReg == 0 && |
936 | 84 | (!AM.GV || !Subtarget->isPICStyleRIPRel()0 ) && |
937 | 84 | (S == 1 || S == 240 || S == 440 || S == 88 )) { |
938 | 84 | // Scaled-index addressing. |
939 | 84 | Scale = S; |
940 | 84 | IndexReg = getRegForGEPIndex(Op).first; |
941 | 84 | if (IndexReg == 0) |
942 | 5 | return false; |
943 | 79 | break; |
944 | 79 | } |
945 | 0 | // Unsupported. |
946 | 0 | goto unsupported_gep; |
947 | 0 | } |
948 | 603 | } |
949 | 519 | |
950 | 519 | // Check for displacement overflow. |
951 | 519 | if (514 !isInt<32>(Disp)514 ) |
952 | 0 | break; |
953 | 514 | |
954 | 514 | AM.IndexReg = IndexReg; |
955 | 514 | AM.Scale = Scale; |
956 | 514 | AM.Disp = (uint32_t)Disp; |
957 | 514 | GEPs.push_back(V); |
958 | 514 | |
959 | 514 | if (const GetElementPtrInst *GEP = |
960 | 20 | dyn_cast<GetElementPtrInst>(U->getOperand(0))) { |
961 | 20 | // Ok, the GEP indices were covered by constant-offset and scaled-index |
962 | 20 | // addressing. Update the address state and move on to examining the base. |
963 | 20 | V = GEP; |
964 | 20 | goto redo_gep; |
965 | 494 | } else if (X86SelectAddress(U->getOperand(0), AM)) { |
966 | 470 | return true; |
967 | 470 | } |
968 | 24 | |
969 | 24 | // If we couldn't merge the gep value into this addr mode, revert back to |
970 | 24 | // our address and just match the value instead of completely failing. |
971 | 24 | AM = SavedAM; |
972 | 24 | |
973 | 24 | for (const Value *I : reverse(GEPs)) |
974 | 24 | if (handleConstantAddresses(I, AM)) |
975 | 12 | return true; |
976 | 24 | |
977 | 24 | return false12 ; |
978 | 0 | unsupported_gep: |
979 | 0 | // Ok, the GEP indices weren't all covered. |
980 | 0 | break; |
981 | 2.46k | } |
982 | 2.46k | } |
983 | 2.46k | |
984 | 2.46k | return handleConstantAddresses(V, AM); |
985 | 2.46k | } |
986 | | |
987 | | /// X86SelectCallAddress - Attempt to fill in an address from the given value. |
988 | | /// |
989 | 1.04k | bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) { |
990 | 1.04k | const User *U = nullptr; |
991 | 1.04k | unsigned Opcode = Instruction::UserOp1; |
992 | 1.04k | const Instruction *I = dyn_cast<Instruction>(V); |
993 | 1.04k | // Record if the value is defined in the same basic block. |
994 | 1.04k | // |
995 | 1.04k | // This information is crucial to know whether or not folding an |
996 | 1.04k | // operand is valid. |
997 | 1.04k | // Indeed, FastISel generates or reuses a virtual register for all |
998 | 1.04k | // operands of all instructions it selects. Obviously, the definition and |
999 | 1.04k | // its uses must use the same virtual register otherwise the produced |
1000 | 1.04k | // code is incorrect. |
1001 | 1.04k | // Before instruction selection, FunctionLoweringInfo::set sets the virtual |
1002 | 1.04k | // registers for values that are alive across basic blocks. This ensures |
1003 | 1.04k | // that the values are consistently set between across basic block, even |
1004 | 1.04k | // if different instruction selection mechanisms are used (e.g., a mix of |
1005 | 1.04k | // SDISel and FastISel). |
1006 | 1.04k | // For values local to a basic block, the instruction selection process |
1007 | 1.04k | // generates these virtual registers with whatever method is appropriate |
1008 | 1.04k | // for its needs. In particular, FastISel and SDISel do not share the way |
1009 | 1.04k | // local virtual registers are set. |
1010 | 1.04k | // Therefore, this is impossible (or at least unsafe) to share values |
1011 | 1.04k | // between basic blocks unless they use the same instruction selection |
1012 | 1.04k | // method, which is not guarantee for X86. |
1013 | 1.04k | // Moreover, things like hasOneUse could not be used accurately, if we |
1014 | 1.04k | // allow to reference values across basic blocks whereas they are not |
1015 | 1.04k | // alive across basic blocks initially. |
1016 | 1.04k | bool InMBB = true; |
1017 | 1.04k | if (I) { |
1018 | 28 | Opcode = I->getOpcode(); |
1019 | 28 | U = I; |
1020 | 28 | InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock(); |
1021 | 1.01k | } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) { |
1022 | 29 | Opcode = C->getOpcode(); |
1023 | 29 | U = C; |
1024 | 29 | } |
1025 | 1.04k | |
1026 | 1.04k | switch (Opcode) { |
1027 | 1.04k | default: break1.00k ; |
1028 | 1.04k | case Instruction::BitCast: |
1029 | 35 | // Look past bitcasts if its operand is in the same BB. |
1030 | 35 | if (InMBB) |
1031 | 33 | return X86SelectCallAddress(U->getOperand(0), AM); |
1032 | 2 | break; |
1033 | 2 | |
1034 | 9 | case Instruction::IntToPtr: |
1035 | 9 | // Look past no-op inttoptrs if its operand is in the same BB. |
1036 | 9 | if (InMBB && |
1037 | 9 | TLI.getValueType(DL, U->getOperand(0)->getType()) == |
1038 | 9 | TLI.getPointerTy(DL)) |
1039 | 9 | return X86SelectCallAddress(U->getOperand(0), AM); |
1040 | 0 | break; |
1041 | 0 |
|
1042 | 2 | case Instruction::PtrToInt: |
1043 | 2 | // Look past no-op ptrtoints if its operand is in the same BB. |
1044 | 2 | if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)1 ) |
1045 | 1 | return X86SelectCallAddress(U->getOperand(0), AM); |
1046 | 1 | break; |
1047 | 1.00k | } |
1048 | 1.00k | |
1049 | 1.00k | // Handle constant address. |
1050 | 1.00k | if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { |
1051 | 979 | // Can't handle alternate code models yet. |
1052 | 979 | if (TM.getCodeModel() != CodeModel::Small) |
1053 | 56 | return false; |
1054 | 923 | |
1055 | 923 | // RIP-relative addresses can't have additional register operands. |
1056 | 923 | if (Subtarget->isPICStyleRIPRel() && |
1057 | 923 | (409 AM.Base.Reg != 0409 || AM.IndexReg != 0409 )) |
1058 | 0 | return false; |
1059 | 923 | |
1060 | 923 | // Can't handle TLS. |
1061 | 923 | if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) |
1062 | 0 | if (GVar->isThreadLocal()) |
1063 | 0 | return false; |
1064 | 923 | |
1065 | 923 | // Okay, we've committed to selecting this global. Set up the basic address. |
1066 | 923 | AM.GV = GV; |
1067 | 923 | |
1068 | 923 | // Return a direct reference to the global. Fastisel can handle calls to |
1069 | 923 | // functions that require loads, such as dllimport and nonlazybind |
1070 | 923 | // functions. |
1071 | 923 | if (Subtarget->isPICStyleRIPRel()) { |
1072 | 409 | // Use rip-relative addressing if we can. Above we verified that the |
1073 | 409 | // base and index registers are unused. |
1074 | 409 | assert(AM.Base.Reg == 0 && AM.IndexReg == 0); |
1075 | 409 | AM.Base.Reg = X86::RIP; |
1076 | 514 | } else { |
1077 | 514 | AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr); |
1078 | 514 | } |
1079 | 923 | |
1080 | 923 | return true; |
1081 | 923 | } |
1082 | 25 | |
1083 | 25 | // If all else fails, try to materialize the value in a register. |
1084 | 25 | if (!AM.GV || !Subtarget->isPICStyleRIPRel()0 ) { |
1085 | 25 | if (AM.Base.Reg == 0) { |
1086 | 25 | AM.Base.Reg = getRegForValue(V); |
1087 | 25 | return AM.Base.Reg != 0; |
1088 | 25 | } |
1089 | 0 | if (AM.IndexReg == 0) { |
1090 | 0 | assert(AM.Scale == 1 && "Scale with no index!"); |
1091 | 0 | AM.IndexReg = getRegForValue(V); |
1092 | 0 | return AM.IndexReg != 0; |
1093 | 0 | } |
1094 | 0 | } |
1095 | 0 | |
1096 | 0 | return false; |
1097 | 0 | } |
1098 | | |
1099 | | |
1100 | | /// X86SelectStore - Select and emit code to implement store instructions. |
1101 | 2.17k | bool X86FastISel::X86SelectStore(const Instruction *I) { |
1102 | 2.17k | // Atomic stores need special handling. |
1103 | 2.17k | const StoreInst *S = cast<StoreInst>(I); |
1104 | 2.17k | |
1105 | 2.17k | if (S->isAtomic()) |
1106 | 66 | return false; |
1107 | 2.10k | |
1108 | 2.10k | const Value *PtrV = I->getOperand(1); |
1109 | 2.10k | if (TLI.supportSwiftError()) { |
1110 | 1.63k | // Swifterror values can come from either a function parameter with |
1111 | 1.63k | // swifterror attribute or an alloca with swifterror attribute. |
1112 | 1.63k | if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { |
1113 | 417 | if (Arg->hasSwiftErrorAttr()) |
1114 | 2 | return false; |
1115 | 1.63k | } |
1116 | 1.63k | |
1117 | 1.63k | if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { |
1118 | 922 | if (Alloca->isSwiftError()) |
1119 | 2 | return false; |
1120 | 2.10k | } |
1121 | 1.63k | } |
1122 | 2.10k | |
1123 | 2.10k | const Value *Val = S->getValueOperand(); |
1124 | 2.10k | const Value *Ptr = S->getPointerOperand(); |
1125 | 2.10k | |
1126 | 2.10k | MVT VT; |
1127 | 2.10k | if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true)) |
1128 | 127 | return false; |
1129 | 1.97k | |
1130 | 1.97k | unsigned Alignment = S->getAlignment(); |
1131 | 1.97k | unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType()); |
1132 | 1.97k | if (Alignment == 0) // Ensure that codegen never sees alignment 0 |
1133 | 340 | Alignment = ABIAlignment; |
1134 | 1.97k | bool Aligned = Alignment >= ABIAlignment; |
1135 | 1.97k | |
1136 | 1.97k | X86AddressMode AM; |
1137 | 1.97k | if (!X86SelectAddress(Ptr, AM)) |
1138 | 4 | return false; |
1139 | 1.97k | |
1140 | 1.97k | return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned); |
1141 | 1.97k | } |
1142 | | |
1143 | | /// X86SelectRet - Select and emit code to implement ret instructions. |
1144 | 9.65k | bool X86FastISel::X86SelectRet(const Instruction *I) { |
1145 | 9.65k | const ReturnInst *Ret = cast<ReturnInst>(I); |
1146 | 9.65k | const Function &F = *I->getParent()->getParent(); |
1147 | 9.65k | const X86MachineFunctionInfo *X86MFInfo = |
1148 | 9.65k | FuncInfo.MF->getInfo<X86MachineFunctionInfo>(); |
1149 | 9.65k | |
1150 | 9.65k | if (!FuncInfo.CanLowerReturn) |
1151 | 3 | return false; |
1152 | 9.65k | |
1153 | 9.65k | if (TLI.supportSwiftError() && |
1154 | 9.65k | F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)6.10k ) |
1155 | 20 | return false; |
1156 | 9.63k | |
1157 | 9.63k | if (TLI.supportSplitCSR(FuncInfo.MF)) |
1158 | 5 | return false; |
1159 | 9.63k | |
1160 | 9.63k | CallingConv::ID CC = F.getCallingConv(); |
1161 | 9.63k | if (CC != CallingConv::C && |
1162 | 9.63k | CC != CallingConv::Fast112 && |
1163 | 9.63k | CC != CallingConv::X86_FastCall105 && |
1164 | 9.63k | CC != CallingConv::X86_StdCall99 && |
1165 | 9.63k | CC != CallingConv::X86_ThisCall89 && |
1166 | 9.63k | CC != CallingConv::X86_64_SysV36 && |
1167 | 9.63k | CC != CallingConv::Win6436 ) |
1168 | 33 | return false; |
1169 | 9.59k | |
1170 | 9.59k | // Don't handle popping bytes if they don't fit the ret's immediate. |
1171 | 9.59k | if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn())) |
1172 | 2 | return false; |
1173 | 9.59k | |
1174 | 9.59k | // fastcc with -tailcallopt is intended to provide a guaranteed |
1175 | 9.59k | // tail call optimization. Fastisel doesn't know how to do that. |
1176 | 9.59k | if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt7 ) |
1177 | 1 | return false; |
1178 | 9.59k | |
1179 | 9.59k | // Let SDISel handle vararg functions. |
1180 | 9.59k | if (F.isVarArg()) |
1181 | 9 | return false; |
1182 | 9.58k | |
1183 | 9.58k | // Build a list of return value registers. |
1184 | 9.58k | SmallVector<unsigned, 4> RetRegs; |
1185 | 9.58k | |
1186 | 9.58k | if (Ret->getNumOperands() > 0) { |
1187 | 7.94k | SmallVector<ISD::OutputArg, 4> Outs; |
1188 | 7.94k | GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); |
1189 | 7.94k | |
1190 | 7.94k | // Analyze operands of the call, assigning locations to each operand. |
1191 | 7.94k | SmallVector<CCValAssign, 16> ValLocs; |
1192 | 7.94k | CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); |
1193 | 7.94k | CCInfo.AnalyzeReturn(Outs, RetCC_X86); |
1194 | 7.94k | |
1195 | 7.94k | const Value *RV = Ret->getOperand(0); |
1196 | 7.94k | unsigned Reg = getRegForValue(RV); |
1197 | 7.94k | if (Reg == 0) |
1198 | 227 | return false; |
1199 | 7.72k | |
1200 | 7.72k | // Only handle a single return value for now. |
1201 | 7.72k | if (ValLocs.size() != 1) |
1202 | 0 | return false; |
1203 | 7.72k | |
1204 | 7.72k | CCValAssign &VA = ValLocs[0]; |
1205 | 7.72k | |
1206 | 7.72k | // Don't bother handling odd stuff for now. |
1207 | 7.72k | if (VA.getLocInfo() != CCValAssign::Full) |
1208 | 0 | return false; |
1209 | 7.72k | // Only handle register returns for now. |
1210 | 7.72k | if (!VA.isRegLoc()) |
1211 | 0 | return false; |
1212 | 7.72k | |
1213 | 7.72k | // The calling-convention tables for x87 returns don't tell |
1214 | 7.72k | // the whole story. |
1215 | 7.72k | if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP17.65k ) |
1216 | 62 | return false; |
1217 | 7.65k | |
1218 | 7.65k | unsigned SrcReg = Reg + VA.getValNo(); |
1219 | 7.65k | EVT SrcVT = TLI.getValueType(DL, RV->getType()); |
1220 | 7.65k | EVT DstVT = VA.getValVT(); |
1221 | 7.65k | // Special handling for extended integers. |
1222 | 7.65k | if (SrcVT != DstVT) { |
1223 | 386 | if (SrcVT != MVT::i1 && SrcVT != MVT::i890 && SrcVT != MVT::i1632 ) |
1224 | 0 | return false; |
1225 | 386 | |
1226 | 386 | if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()30 ) |
1227 | 20 | return false; |
1228 | 366 | |
1229 | 366 | assert(DstVT == MVT::i32 && "X86 should always ext to i32"); |
1230 | 366 | |
1231 | 366 | if (SrcVT == MVT::i1) { |
1232 | 276 | if (Outs[0].Flags.isSExt()) |
1233 | 0 | return false; |
1234 | 276 | SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false); |
1235 | 276 | SrcVT = MVT::i8; |
1236 | 276 | } |
1237 | 366 | unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND356 : |
1238 | 366 | ISD::SIGN_EXTEND10 ; |
1239 | 366 | SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, |
1240 | 366 | SrcReg, /*TODO: Kill=*/false); |
1241 | 366 | } |
1242 | 7.65k | |
1243 | 7.65k | // Make the copy. |
1244 | 7.65k | unsigned DstReg = VA.getLocReg(); |
1245 | 7.63k | const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); |
1246 | 7.63k | // Avoid a cross-class copy. This is very unlikely. |
1247 | 7.63k | if (!SrcRC->contains(DstReg)) |
1248 | 1 | return false; |
1249 | 7.63k | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1250 | 7.63k | TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); |
1251 | 7.63k | |
1252 | 7.63k | // Add register to return instruction. |
1253 | 7.63k | RetRegs.push_back(VA.getLocReg()); |
1254 | 7.63k | } |
1255 | 9.58k | |
1256 | 9.58k | // Swift calling convention does not require we copy the sret argument |
1257 | 9.58k | // into %rax/%eax for the return, and SRetReturnReg is not set for Swift. |
1258 | 9.58k | |
1259 | 9.58k | // All x86 ABIs require that for returning structs by value we copy |
1260 | 9.58k | // the sret argument into %rax/%eax (depending on ABI) for the return. |
1261 | 9.58k | // We saved the argument into a virtual register in the entry block, |
1262 | 9.58k | // so now we copy the value out and into %rax/%eax. |
1263 | 9.58k | if (9.27k F.hasStructRetAttr()9.27k && CC != CallingConv::Swift73 ) { |
1264 | 73 | unsigned Reg = X86MFInfo->getSRetReturnReg(); |
1265 | 73 | assert(Reg && |
1266 | 73 | "SRetReturnReg should have been set in LowerFormalArguments()!"); |
1267 | 73 | unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX40 : X86::EAX33 ; |
1268 | 73 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1269 | 73 | TII.get(TargetOpcode::COPY), RetReg).addReg(Reg); |
1270 | 73 | RetRegs.push_back(RetReg); |
1271 | 73 | } |
1272 | 9.27k | |
1273 | 9.27k | // Now emit the RET. |
1274 | 9.27k | MachineInstrBuilder MIB; |
1275 | 9.27k | if (X86MFInfo->getBytesToPopOnReturn()) { |
1276 | 54 | MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1277 | 54 | TII.get(Subtarget->is64Bit() ? X86::RETIQ0 : X86::RETIL)) |
1278 | 54 | .addImm(X86MFInfo->getBytesToPopOnReturn()); |
1279 | 9.22k | } else { |
1280 | 9.22k | MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1281 | 9.22k | TII.get(Subtarget->is64Bit() ? X86::RETQ5.87k : X86::RETL3.35k )); |
1282 | 9.22k | } |
1283 | 16.9k | for (unsigned i = 0, e = RetRegs.size(); i != e; ++i7.71k ) |
1284 | 7.71k | MIB.addReg(RetRegs[i], RegState::Implicit); |
1285 | 9.27k | return true; |
1286 | 9.58k | } |
1287 | | |
1288 | | /// X86SelectLoad - Select and emit code to implement load instructions. |
1289 | | /// |
1290 | 1.47k | bool X86FastISel::X86SelectLoad(const Instruction *I) { |
1291 | 1.47k | const LoadInst *LI = cast<LoadInst>(I); |
1292 | 1.47k | |
1293 | 1.47k | // Atomic loads need special handling. |
1294 | 1.47k | if (LI->isAtomic()) |
1295 | 59 | return false; |
1296 | 1.41k | |
1297 | 1.41k | const Value *SV = I->getOperand(0); |
1298 | 1.41k | if (TLI.supportSwiftError()) { |
1299 | 1.03k | // Swifterror values can come from either a function parameter with |
1300 | 1.03k | // swifterror attribute or an alloca with swifterror attribute. |
1301 | 1.03k | if (const Argument *Arg = dyn_cast<Argument>(SV)) { |
1302 | 274 | if (Arg->hasSwiftErrorAttr()) |
1303 | 0 | return false; |
1304 | 1.03k | } |
1305 | 1.03k | |
1306 | 1.03k | if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { |
1307 | 448 | if (Alloca->isSwiftError()) |
1308 | 5 | return false; |
1309 | 1.40k | } |
1310 | 1.03k | } |
1311 | 1.40k | |
1312 | 1.40k | MVT VT; |
1313 | 1.40k | if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true)) |
1314 | 3 | return false; |
1315 | 1.40k | |
1316 | 1.40k | const Value *Ptr = LI->getPointerOperand(); |
1317 | 1.40k | |
1318 | 1.40k | X86AddressMode AM; |
1319 | 1.40k | if (!X86SelectAddress(Ptr, AM)) |
1320 | 58 | return false; |
1321 | 1.34k | |
1322 | 1.34k | unsigned Alignment = LI->getAlignment(); |
1323 | 1.34k | unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType()); |
1324 | 1.34k | if (Alignment == 0) // Ensure that codegen never sees alignment 0 |
1325 | 239 | Alignment = ABIAlignment; |
1326 | 1.34k | |
1327 | 1.34k | unsigned ResultReg = 0; |
1328 | 1.34k | if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg, |
1329 | 1.34k | Alignment)) |
1330 | 14 | return false; |
1331 | 1.33k | |
1332 | 1.33k | updateValueMap(I, ResultReg); |
1333 | 1.33k | return true; |
1334 | 1.33k | } |
1335 | | |
1336 | 440 | static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) { |
1337 | 440 | bool HasAVX512 = Subtarget->hasAVX512(); |
1338 | 440 | bool HasAVX = Subtarget->hasAVX(); |
1339 | 440 | bool X86ScalarSSEf32 = Subtarget->hasSSE1(); |
1340 | 440 | bool X86ScalarSSEf64 = Subtarget->hasSSE2(); |
1341 | 440 | |
1342 | 440 | switch (VT.getSimpleVT().SimpleTy) { |
1343 | 440 | default: return 074 ; |
1344 | 440 | case MVT::i8: return X86::CMP8rr4 ; |
1345 | 440 | case MVT::i16: return X86::CMP16rr2 ; |
1346 | 440 | case MVT::i32: return X86::CMP32rr67 ; |
1347 | 440 | case MVT::i64: return X86::CMP64rr92 ; |
1348 | 440 | case MVT::f32: |
1349 | 152 | return X86ScalarSSEf32 |
1350 | 152 | ? (HasAVX512 ? X86::VUCOMISSZrr37 |
1351 | 152 | : HasAVX 115 ? X86::VUCOMISSrr36 : X86::UCOMISSrr79 ) |
1352 | 152 | : 00 ; |
1353 | 440 | case MVT::f64: |
1354 | 49 | return X86ScalarSSEf64 |
1355 | 49 | ? (HasAVX512 ? X86::VUCOMISDZrr14 |
1356 | 49 | : HasAVX 35 ? X86::VUCOMISDrr14 : X86::UCOMISDrr21 ) |
1357 | 49 | : 00 ; |
1358 | 440 | } |
1359 | 440 | } |
1360 | | |
1361 | | /// If we have a comparison with RHS as the RHS of the comparison, return an |
1362 | | /// opcode that works for the compare (e.g. CMP32ri) otherwise return 0. |
1363 | 152 | static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) { |
1364 | 152 | int64_t Val = RHSC->getSExtValue(); |
1365 | 152 | switch (VT.getSimpleVT().SimpleTy) { |
1366 | 152 | // Otherwise, we can't fold the immediate into this comparison. |
1367 | 152 | default: |
1368 | 0 | return 0; |
1369 | 152 | case MVT::i8: |
1370 | 10 | return X86::CMP8ri; |
1371 | 152 | case MVT::i16: |
1372 | 8 | if (isInt<8>(Val)) |
1373 | 8 | return X86::CMP16ri8; |
1374 | 0 | return X86::CMP16ri; |
1375 | 112 | case MVT::i32: |
1376 | 112 | if (isInt<8>(Val)) |
1377 | 99 | return X86::CMP32ri8; |
1378 | 13 | return X86::CMP32ri; |
1379 | 22 | case MVT::i64: |
1380 | 22 | if (isInt<8>(Val)) |
1381 | 22 | return X86::CMP64ri8; |
1382 | 0 | // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext |
1383 | 0 | // field. |
1384 | 0 | if (isInt<32>(Val)) |
1385 | 0 | return X86::CMP64ri32; |
1386 | 0 | return 0; |
1387 | 152 | } |
1388 | 152 | } |
1389 | | |
1390 | | bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT, |
1391 | 593 | const DebugLoc &CurDbgLoc) { |
1392 | 593 | unsigned Op0Reg = getRegForValue(Op0); |
1393 | 593 | if (Op0Reg == 0) return false1 ; |
1394 | 592 | |
1395 | 592 | // Handle 'null' like i32/i64 0. |
1396 | 592 | if (isa<ConstantPointerNull>(Op1)) |
1397 | 13 | Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext())); |
1398 | 592 | |
1399 | 592 | // We have two options: compare with register or immediate. If the RHS of |
1400 | 592 | // the compare is an immediate that we can fold into this compare, use |
1401 | 592 | // CMPri, otherwise use CMPrr. |
1402 | 592 | if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { |
1403 | 152 | if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { |
1404 | 152 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc)) |
1405 | 152 | .addReg(Op0Reg) |
1406 | 152 | .addImm(Op1C->getSExtValue()); |
1407 | 152 | return true; |
1408 | 152 | } |
1409 | 440 | } |
1410 | 440 | |
1411 | 440 | unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget); |
1412 | 440 | if (CompareOpc == 0) return false74 ; |
1413 | 366 | |
1414 | 366 | unsigned Op1Reg = getRegForValue(Op1); |
1415 | 366 | if (Op1Reg == 0) return false0 ; |
1416 | 366 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc)) |
1417 | 366 | .addReg(Op0Reg) |
1418 | 366 | .addReg(Op1Reg); |
1419 | 366 | |
1420 | 366 | return true; |
1421 | 366 | } |
1422 | | |
1423 | 309 | bool X86FastISel::X86SelectCmp(const Instruction *I) { |
1424 | 309 | const CmpInst *CI = cast<CmpInst>(I); |
1425 | 309 | |
1426 | 309 | MVT VT; |
1427 | 309 | if (!isTypeLegal(I->getOperand(0)->getType(), VT)) |
1428 | 1 | return false; |
1429 | 308 | |
1430 | 308 | // Try to optimize or fold the cmp. |
1431 | 308 | CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); |
1432 | 308 | unsigned ResultReg = 0; |
1433 | 308 | switch (Predicate) { |
1434 | 308 | default: break260 ; |
1435 | 308 | case CmpInst::FCMP_FALSE: { |
1436 | 24 | ResultReg = createResultReg(&X86::GR32RegClass); |
1437 | 24 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), |
1438 | 24 | ResultReg); |
1439 | 24 | ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true, |
1440 | 24 | X86::sub_8bit); |
1441 | 24 | if (!ResultReg) |
1442 | 0 | return false; |
1443 | 24 | break; |
1444 | 24 | } |
1445 | 24 | case CmpInst::FCMP_TRUE: { |
1446 | 24 | ResultReg = createResultReg(&X86::GR8RegClass); |
1447 | 24 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), |
1448 | 24 | ResultReg).addImm(1); |
1449 | 24 | break; |
1450 | 308 | } |
1451 | 308 | } |
1452 | 308 | |
1453 | 308 | if (ResultReg) { |
1454 | 48 | updateValueMap(I, ResultReg); |
1455 | 48 | return true; |
1456 | 48 | } |
1457 | 260 | |
1458 | 260 | const Value *LHS = CI->getOperand(0); |
1459 | 260 | const Value *RHS = CI->getOperand(1); |
1460 | 260 | |
1461 | 260 | // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0. |
1462 | 260 | // We don't have to materialize a zero constant for this case and can just use |
1463 | 260 | // %x again on the RHS. |
1464 | 260 | if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO238 ) { |
1465 | 44 | const auto *RHSC = dyn_cast<ConstantFP>(RHS); |
1466 | 44 | if (RHSC && RHSC->isNullValue()6 ) |
1467 | 6 | RHS = LHS; |
1468 | 44 | } |
1469 | 260 | |
1470 | 260 | // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction. |
1471 | 260 | static const uint16_t SETFOpcTable[2][3] = { |
1472 | 260 | { X86::COND_E, X86::COND_NP, X86::AND8rr }, |
1473 | 260 | { X86::COND_NE, X86::COND_P, X86::OR8rr } |
1474 | 260 | }; |
1475 | 260 | const uint16_t *SETFOpc = nullptr; |
1476 | 260 | switch (Predicate) { |
1477 | 260 | default: break237 ; |
1478 | 260 | case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break13 ; |
1479 | 260 | case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break10 ; |
1480 | 260 | } |
1481 | 260 | |
1482 | 260 | ResultReg = createResultReg(&X86::GR8RegClass); |
1483 | 260 | if (SETFOpc) { |
1484 | 23 | if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc())) |
1485 | 8 | return false; |
1486 | 15 | |
1487 | 15 | unsigned FlagReg1 = createResultReg(&X86::GR8RegClass); |
1488 | 15 | unsigned FlagReg2 = createResultReg(&X86::GR8RegClass); |
1489 | 15 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), |
1490 | 15 | FlagReg1).addImm(SETFOpc[0]); |
1491 | 15 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), |
1492 | 15 | FlagReg2).addImm(SETFOpc[1]); |
1493 | 15 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]), |
1494 | 15 | ResultReg).addReg(FlagReg1).addReg(FlagReg2); |
1495 | 15 | updateValueMap(I, ResultReg); |
1496 | 15 | return true; |
1497 | 15 | } |
1498 | 237 | |
1499 | 237 | X86::CondCode CC; |
1500 | 237 | bool SwapArgs; |
1501 | 237 | std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate); |
1502 | 237 | assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); |
1503 | 237 | |
1504 | 237 | if (SwapArgs) |
1505 | 56 | std::swap(LHS, RHS); |
1506 | 237 | |
1507 | 237 | // Emit a compare of LHS/RHS. |
1508 | 237 | if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc())) |
1509 | 63 | return false; |
1510 | 174 | |
1511 | 174 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), |
1512 | 174 | ResultReg).addImm(CC); |
1513 | 174 | updateValueMap(I, ResultReg); |
1514 | 174 | return true; |
1515 | 174 | } |
1516 | | |
1517 | 133 | bool X86FastISel::X86SelectZExt(const Instruction *I) { |
1518 | 133 | EVT DstVT = TLI.getValueType(DL, I->getType()); |
1519 | 133 | if (!TLI.isTypeLegal(DstVT)) |
1520 | 0 | return false; |
1521 | 133 | |
1522 | 133 | unsigned ResultReg = getRegForValue(I->getOperand(0)); |
1523 | 133 | if (ResultReg == 0) |
1524 | 42 | return false; |
1525 | 91 | |
1526 | 91 | // Handle zero-extension from i1 to i8, which is common. |
1527 | 91 | MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); |
1528 | 91 | if (SrcVT == MVT::i1) { |
1529 | 71 | // Set the high bits to zero. |
1530 | 71 | ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); |
1531 | 71 | SrcVT = MVT::i8; |
1532 | 71 | |
1533 | 71 | if (ResultReg == 0) |
1534 | 0 | return false; |
1535 | 91 | } |
1536 | 91 | |
1537 | 91 | if (DstVT == MVT::i64) { |
1538 | 21 | // Handle extension to 64-bits via sub-register shenanigans. |
1539 | 21 | unsigned MovInst; |
1540 | 21 | |
1541 | 21 | switch (SrcVT.SimpleTy) { |
1542 | 21 | case MVT::i8: MovInst = X86::MOVZX32rr8; break7 ; |
1543 | 21 | case MVT::i16: MovInst = X86::MOVZX32rr16; break2 ; |
1544 | 21 | case MVT::i32: MovInst = X86::MOV32rr; break12 ; |
1545 | 21 | default: 0 llvm_unreachable0 ("Unexpected zext to i64 source type"); |
1546 | 21 | } |
1547 | 21 | |
1548 | 21 | unsigned Result32 = createResultReg(&X86::GR32RegClass); |
1549 | 21 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32) |
1550 | 21 | .addReg(ResultReg); |
1551 | 21 | |
1552 | 21 | ResultReg = createResultReg(&X86::GR64RegClass); |
1553 | 21 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG), |
1554 | 21 | ResultReg) |
1555 | 21 | .addImm(0).addReg(Result32).addImm(X86::sub_32bit); |
1556 | 70 | } else if (DstVT == MVT::i16) { |
1557 | 9 | // i8->i16 doesn't exist in the autogenerated isel table. Need to zero |
1558 | 9 | // extend to 32-bits and then extract down to 16-bits. |
1559 | 9 | unsigned Result32 = createResultReg(&X86::GR32RegClass); |
1560 | 9 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8), |
1561 | 9 | Result32).addReg(ResultReg); |
1562 | 9 | |
1563 | 9 | ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, /*Kill=*/true, |
1564 | 9 | X86::sub_16bit); |
1565 | 61 | } else if (DstVT != MVT::i8) { |
1566 | 50 | ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, |
1567 | 50 | ResultReg, /*Kill=*/true); |
1568 | 50 | if (ResultReg == 0) |
1569 | 0 | return false; |
1570 | 91 | } |
1571 | 91 | |
1572 | 91 | updateValueMap(I, ResultReg); |
1573 | 91 | return true; |
1574 | 91 | } |
1575 | | |
1576 | 211 | bool X86FastISel::X86SelectSExt(const Instruction *I) { |
1577 | 211 | EVT DstVT = TLI.getValueType(DL, I->getType()); |
1578 | 211 | if (!TLI.isTypeLegal(DstVT)) |
1579 | 0 | return false; |
1580 | 211 | |
1581 | 211 | unsigned ResultReg = getRegForValue(I->getOperand(0)); |
1582 | 211 | if (ResultReg == 0) |
1583 | 200 | return false; |
1584 | 11 | |
1585 | 11 | // Handle sign-extension from i1 to i8. |
1586 | 11 | MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); |
1587 | 11 | if (SrcVT == MVT::i1) { |
1588 | 8 | // Set the high bits to zero. |
1589 | 8 | unsigned ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg, |
1590 | 8 | /*TODO: Kill=*/false); |
1591 | 8 | if (ZExtReg == 0) |
1592 | 0 | return false; |
1593 | 8 | |
1594 | 8 | // Negate the result to make an 8-bit sign extended value. |
1595 | 8 | ResultReg = createResultReg(&X86::GR8RegClass); |
1596 | 8 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::NEG8r), |
1597 | 8 | ResultReg).addReg(ZExtReg); |
1598 | 8 | |
1599 | 8 | SrcVT = MVT::i8; |
1600 | 8 | } |
1601 | 11 | |
1602 | 11 | if (DstVT == MVT::i16) { |
1603 | 4 | // i8->i16 doesn't exist in the autogenerated isel table. Need to sign |
1604 | 4 | // extend to 32-bits and then extract down to 16-bits. |
1605 | 4 | unsigned Result32 = createResultReg(&X86::GR32RegClass); |
1606 | 4 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8), |
1607 | 4 | Result32).addReg(ResultReg); |
1608 | 4 | |
1609 | 4 | ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, /*Kill=*/true, |
1610 | 4 | X86::sub_16bit); |
1611 | 7 | } else if (DstVT != MVT::i8) { |
1612 | 5 | ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND, |
1613 | 5 | ResultReg, /*Kill=*/true); |
1614 | 5 | if (ResultReg == 0) |
1615 | 1 | return false; |
1616 | 10 | } |
1617 | 10 | |
1618 | 10 | updateValueMap(I, ResultReg); |
1619 | 10 | return true; |
1620 | 10 | } |
1621 | | |
1622 | 432 | bool X86FastISel::X86SelectBranch(const Instruction *I) { |
1623 | 432 | // Unconditional branches are selected by tablegen-generated code. |
1624 | 432 | // Handle a conditional branch. |
1625 | 432 | const BranchInst *BI = cast<BranchInst>(I); |
1626 | 432 | MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; |
1627 | 432 | MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; |
1628 | 432 | |
1629 | 432 | // Fold the common case of a conditional branch with a comparison |
1630 | 432 | // in the same block (values defined on other blocks may not have |
1631 | 432 | // initialized registers). |
1632 | 432 | X86::CondCode CC; |
1633 | 432 | if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { |
1634 | 229 | if (CI->hasOneUse() && CI->getParent() == I->getParent()226 ) { |
1635 | 224 | EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType()); |
1636 | 224 | |
1637 | 224 | // Try to optimize or fold the cmp. |
1638 | 224 | CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); |
1639 | 224 | switch (Predicate) { |
1640 | 224 | default: break207 ; |
1641 | 224 | case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true9 ; |
1642 | 224 | case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true8 ; |
1643 | 207 | } |
1644 | 207 | |
1645 | 207 | const Value *CmpLHS = CI->getOperand(0); |
1646 | 207 | const Value *CmpRHS = CI->getOperand(1); |
1647 | 207 | |
1648 | 207 | // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, |
1649 | 207 | // 0.0. |
1650 | 207 | // We don't have to materialize a zero constant for this case and can just |
1651 | 207 | // use %x again on the RHS. |
1652 | 207 | if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO201 ) { |
1653 | 12 | const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS); |
1654 | 12 | if (CmpRHSC && CmpRHSC->isNullValue()2 ) |
1655 | 2 | CmpRHS = CmpLHS; |
1656 | 12 | } |
1657 | 207 | |
1658 | 207 | // Try to take advantage of fallthrough opportunities. |
1659 | 207 | if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { |
1660 | 133 | std::swap(TrueMBB, FalseMBB); |
1661 | 133 | Predicate = CmpInst::getInversePredicate(Predicate); |
1662 | 133 | } |
1663 | 207 | |
1664 | 207 | // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition |
1665 | 207 | // code check. Instead two branch instructions are required to check all |
1666 | 207 | // the flags. First we change the predicate to a supported condition code, |
1667 | 207 | // which will be the first branch. Later one we will emit the second |
1668 | 207 | // branch. |
1669 | 207 | bool NeedExtraBranch = false; |
1670 | 207 | switch (Predicate) { |
1671 | 207 | default: break199 ; |
1672 | 207 | case CmpInst::FCMP_OEQ: |
1673 | 5 | std::swap(TrueMBB, FalseMBB); |
1674 | 5 | LLVM_FALLTHROUGH; |
1675 | 8 | case CmpInst::FCMP_UNE: |
1676 | 8 | NeedExtraBranch = true; |
1677 | 8 | Predicate = CmpInst::FCMP_ONE; |
1678 | 8 | break; |
1679 | 207 | } |
1680 | 207 | |
1681 | 207 | bool SwapArgs; |
1682 | 207 | std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate); |
1683 | 207 | assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); |
1684 | 207 | |
1685 | 207 | if (SwapArgs) |
1686 | 14 | std::swap(CmpLHS, CmpRHS); |
1687 | 207 | |
1688 | 207 | // Emit a compare of the LHS and RHS, setting the flags. |
1689 | 207 | if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc())) |
1690 | 4 | return false; |
1691 | 203 | |
1692 | 203 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) |
1693 | 203 | .addMBB(TrueMBB).addImm(CC); |
1694 | 203 | |
1695 | 203 | // X86 requires a second branch to handle UNE (and OEQ, which is mapped |
1696 | 203 | // to UNE above). |
1697 | 203 | if (NeedExtraBranch) { |
1698 | 5 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) |
1699 | 5 | .addMBB(TrueMBB).addImm(X86::COND_P); |
1700 | 5 | } |
1701 | 203 | |
1702 | 203 | finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); |
1703 | 203 | return true; |
1704 | 203 | } |
1705 | 203 | } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { |
1706 | 10 | // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which |
1707 | 10 | // typically happen for _Bool and C++ bools. |
1708 | 10 | MVT SourceVT; |
1709 | 10 | if (TI->hasOneUse() && TI->getParent() == I->getParent() && |
1710 | 10 | isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) { |
1711 | 10 | unsigned TestOpc = 0; |
1712 | 10 | switch (SourceVT.SimpleTy) { |
1713 | 10 | default: break0 ; |
1714 | 10 | case MVT::i8: TestOpc = X86::TEST8ri; break; |
1715 | 10 | case MVT::i16: TestOpc = X86::TEST16ri; break0 ; |
1716 | 10 | case MVT::i32: TestOpc = X86::TEST32ri; break0 ; |
1717 | 10 | case MVT::i64: TestOpc = X86::TEST64ri32; break0 ; |
1718 | 10 | } |
1719 | 10 | if (TestOpc) { |
1720 | 10 | unsigned OpReg = getRegForValue(TI->getOperand(0)); |
1721 | 10 | if (OpReg == 0) return false0 ; |
1722 | 10 | |
1723 | 10 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc)) |
1724 | 10 | .addReg(OpReg).addImm(1); |
1725 | 10 | |
1726 | 10 | unsigned JmpCond = X86::COND_NE; |
1727 | 10 | if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { |
1728 | 10 | std::swap(TrueMBB, FalseMBB); |
1729 | 10 | JmpCond = X86::COND_E; |
1730 | 10 | } |
1731 | 10 | |
1732 | 10 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) |
1733 | 10 | .addMBB(TrueMBB).addImm(JmpCond); |
1734 | 10 | |
1735 | 10 | finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); |
1736 | 10 | return true; |
1737 | 10 | } |
1738 | 10 | } |
1739 | 193 | } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) { |
1740 | 16 | // Fake request the condition, otherwise the intrinsic might be completely |
1741 | 16 | // optimized away. |
1742 | 16 | unsigned TmpReg = getRegForValue(BI->getCondition()); |
1743 | 16 | if (TmpReg == 0) |
1744 | 0 | return false; |
1745 | 16 | |
1746 | 16 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) |
1747 | 16 | .addMBB(TrueMBB).addImm(CC); |
1748 | 16 | finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); |
1749 | 16 | return true; |
1750 | 16 | } |
1751 | 182 | |
1752 | 182 | // Otherwise do a clumsy setcc and re-test it. |
1753 | 182 | // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used |
1754 | 182 | // in an explicit cast, so make sure to handle that correctly. |
1755 | 182 | unsigned OpReg = getRegForValue(BI->getCondition()); |
1756 | 182 | if (OpReg == 0) return false0 ; |
1757 | 182 | |
1758 | 182 | // In case OpReg is a K register, COPY to a GPR |
1759 | 182 | if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) { |
1760 | 0 | unsigned KOpReg = OpReg; |
1761 | 0 | OpReg = createResultReg(&X86::GR32RegClass); |
1762 | 0 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1763 | 0 | TII.get(TargetOpcode::COPY), OpReg) |
1764 | 0 | .addReg(KOpReg); |
1765 | 0 | OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Kill=*/true, |
1766 | 0 | X86::sub_8bit); |
1767 | 0 | } |
1768 | 182 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) |
1769 | 182 | .addReg(OpReg) |
1770 | 182 | .addImm(1); |
1771 | 182 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) |
1772 | 182 | .addMBB(TrueMBB).addImm(X86::COND_NE); |
1773 | 182 | finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); |
1774 | 182 | return true; |
1775 | 182 | } |
1776 | | |
1777 | 24 | bool X86FastISel::X86SelectShift(const Instruction *I) { |
1778 | 24 | unsigned CReg = 0, OpReg = 0; |
1779 | 24 | const TargetRegisterClass *RC = nullptr; |
1780 | 24 | if (I->getType()->isIntegerTy(8)) { |
1781 | 1 | CReg = X86::CL; |
1782 | 1 | RC = &X86::GR8RegClass; |
1783 | 1 | switch (I->getOpcode()) { |
1784 | 1 | case Instruction::LShr: OpReg = X86::SHR8rCL; break0 ; |
1785 | 1 | case Instruction::AShr: OpReg = X86::SAR8rCL; break0 ; |
1786 | 1 | case Instruction::Shl: OpReg = X86::SHL8rCL; break; |
1787 | 1 | default: return false0 ; |
1788 | 23 | } |
1789 | 23 | } else if (I->getType()->isIntegerTy(16)) { |
1790 | 3 | CReg = X86::CX; |
1791 | 3 | RC = &X86::GR16RegClass; |
1792 | 3 | switch (I->getOpcode()) { |
1793 | 3 | default: 0 llvm_unreachable0 ("Unexpected shift opcode"); |
1794 | 3 | case Instruction::LShr: OpReg = X86::SHR16rCL; break1 ; |
1795 | 3 | case Instruction::AShr: OpReg = X86::SAR16rCL; break1 ; |
1796 | 3 | case Instruction::Shl: OpReg = X86::SHL16rCL; break1 ; |
1797 | 20 | } |
1798 | 20 | } else if (I->getType()->isIntegerTy(32)) { |
1799 | 8 | CReg = X86::ECX; |
1800 | 8 | RC = &X86::GR32RegClass; |
1801 | 8 | switch (I->getOpcode()) { |
1802 | 8 | default: 0 llvm_unreachable0 ("Unexpected shift opcode"); |
1803 | 8 | case Instruction::LShr: OpReg = X86::SHR32rCL; break1 ; |
1804 | 8 | case Instruction::AShr: OpReg = X86::SAR32rCL; break4 ; |
1805 | 8 | case Instruction::Shl: OpReg = X86::SHL32rCL; break3 ; |
1806 | 12 | } |
1807 | 12 | } else if (I->getType()->isIntegerTy(64)) { |
1808 | 10 | CReg = X86::RCX; |
1809 | 10 | RC = &X86::GR64RegClass; |
1810 | 10 | switch (I->getOpcode()) { |
1811 | 10 | default: 0 llvm_unreachable0 ("Unexpected shift opcode"); |
1812 | 10 | case Instruction::LShr: OpReg = X86::SHR64rCL; break3 ; |
1813 | 10 | case Instruction::AShr: OpReg = X86::SAR64rCL; break4 ; |
1814 | 10 | case Instruction::Shl: OpReg = X86::SHL64rCL; break3 ; |
1815 | 2 | } |
1816 | 2 | } else { |
1817 | 2 | return false; |
1818 | 2 | } |
1819 | 22 | |
1820 | 22 | MVT VT; |
1821 | 22 | if (!isTypeLegal(I->getType(), VT)) |
1822 | 0 | return false; |
1823 | 22 | |
1824 | 22 | unsigned Op0Reg = getRegForValue(I->getOperand(0)); |
1825 | 22 | if (Op0Reg == 0) return false0 ; |
1826 | 22 | |
1827 | 22 | unsigned Op1Reg = getRegForValue(I->getOperand(1)); |
1828 | 22 | if (Op1Reg == 0) return false0 ; |
1829 | 22 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), |
1830 | 22 | CReg).addReg(Op1Reg); |
1831 | 22 | |
1832 | 22 | // The shift instruction uses X86::CL. If we defined a super-register |
1833 | 22 | // of X86::CL, emit a subreg KILL to precisely describe what we're doing here. |
1834 | 22 | if (CReg != X86::CL) |
1835 | 21 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1836 | 21 | TII.get(TargetOpcode::KILL), X86::CL) |
1837 | 21 | .addReg(CReg, RegState::Kill); |
1838 | 22 | |
1839 | 22 | unsigned ResultReg = createResultReg(RC); |
1840 | 22 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg) |
1841 | 22 | .addReg(Op0Reg); |
1842 | 22 | updateValueMap(I, ResultReg); |
1843 | 22 | return true; |
1844 | 22 | } |
1845 | | |
1846 | 44 | bool X86FastISel::X86SelectDivRem(const Instruction *I) { |
1847 | 44 | const static unsigned NumTypes = 4; // i8, i16, i32, i64 |
1848 | 44 | const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem |
1849 | 44 | const static bool S = true; // IsSigned |
1850 | 44 | const static bool U = false; // !IsSigned |
1851 | 44 | const static unsigned Copy = TargetOpcode::COPY; |
1852 | 44 | // For the X86 DIV/IDIV instruction, in most cases the dividend |
1853 | 44 | // (numerator) must be in a specific register pair highreg:lowreg, |
1854 | 44 | // producing the quotient in lowreg and the remainder in highreg. |
1855 | 44 | // For most data types, to set up the instruction, the dividend is |
1856 | 44 | // copied into lowreg, and lowreg is sign-extended or zero-extended |
1857 | 44 | // into highreg. The exception is i8, where the dividend is defined |
1858 | 44 | // as a single register rather than a register pair, and we |
1859 | 44 | // therefore directly sign-extend or zero-extend the dividend into |
1860 | 44 | // lowreg, instead of copying, and ignore the highreg. |
1861 | 44 | const static struct DivRemEntry { |
1862 | 44 | // The following portion depends only on the data type. |
1863 | 44 | const TargetRegisterClass *RC; |
1864 | 44 | unsigned LowInReg; // low part of the register pair |
1865 | 44 | unsigned HighInReg; // high part of the register pair |
1866 | 44 | // The following portion depends on both the data type and the operation. |
1867 | 44 | struct DivRemResult { |
1868 | 44 | unsigned OpDivRem; // The specific DIV/IDIV opcode to use. |
1869 | 44 | unsigned OpSignExtend; // Opcode for sign-extending lowreg into |
1870 | 44 | // highreg, or copying a zero into highreg. |
1871 | 44 | unsigned OpCopy; // Opcode for copying dividend into lowreg, or |
1872 | 44 | // zero/sign-extending into lowreg for i8. |
1873 | 44 | unsigned DivRemResultReg; // Register containing the desired result. |
1874 | 44 | bool IsOpSigned; // Whether to use signed or unsigned form. |
1875 | 44 | } ResultTable[NumOps]; |
1876 | 44 | } OpTable[NumTypes] = { |
1877 | 44 | { &X86::GR8RegClass, X86::AX, 0, { |
1878 | 44 | { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv |
1879 | 44 | { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem |
1880 | 44 | { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv |
1881 | 44 | { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem |
1882 | 44 | } |
1883 | 44 | }, // i8 |
1884 | 44 | { &X86::GR16RegClass, X86::AX, X86::DX, { |
1885 | 44 | { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv |
1886 | 44 | { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem |
1887 | 44 | { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv |
1888 | 44 | { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem |
1889 | 44 | } |
1890 | 44 | }, // i16 |
1891 | 44 | { &X86::GR32RegClass, X86::EAX, X86::EDX, { |
1892 | 44 | { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv |
1893 | 44 | { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem |
1894 | 44 | { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv |
1895 | 44 | { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem |
1896 | 44 | } |
1897 | 44 | }, // i32 |
1898 | 44 | { &X86::GR64RegClass, X86::RAX, X86::RDX, { |
1899 | 44 | { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv |
1900 | 44 | { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem |
1901 | 44 | { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv |
1902 | 44 | { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem |
1903 | 44 | } |
1904 | 44 | }, // i64 |
1905 | 44 | }; |
1906 | 44 | |
1907 | 44 | MVT VT; |
1908 | 44 | if (!isTypeLegal(I->getType(), VT)) |
1909 | 0 | return false; |
1910 | 44 | |
1911 | 44 | unsigned TypeIndex, OpIndex; |
1912 | 44 | switch (VT.SimpleTy) { |
1913 | 44 | default: return false0 ; |
1914 | 44 | case MVT::i8: TypeIndex = 0; break10 ; |
1915 | 44 | case MVT::i16: TypeIndex = 1; break8 ; |
1916 | 44 | case MVT::i32: TypeIndex = 2; break9 ; |
1917 | 44 | case MVT::i64: TypeIndex = 3; |
1918 | 17 | if (!Subtarget->is64Bit()) |
1919 | 0 | return false; |
1920 | 17 | break; |
1921 | 44 | } |
1922 | 44 | |
1923 | 44 | switch (I->getOpcode()) { |
1924 | 44 | default: 0 llvm_unreachable0 ("Unexpected div/rem opcode"); |
1925 | 44 | case Instruction::SDiv: OpIndex = 0; break11 ; |
1926 | 44 | case Instruction::SRem: OpIndex = 1; break11 ; |
1927 | 44 | case Instruction::UDiv: OpIndex = 2; break11 ; |
1928 | 44 | case Instruction::URem: OpIndex = 3; break11 ; |
1929 | 44 | } |
1930 | 44 | |
1931 | 44 | const DivRemEntry &TypeEntry = OpTable[TypeIndex]; |
1932 | 44 | const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex]; |
1933 | 44 | unsigned Op0Reg = getRegForValue(I->getOperand(0)); |
1934 | 44 | if (Op0Reg == 0) |
1935 | 0 | return false; |
1936 | 44 | unsigned Op1Reg = getRegForValue(I->getOperand(1)); |
1937 | 44 | if (Op1Reg == 0) |
1938 | 0 | return false; |
1939 | 44 | |
1940 | 44 | // Move op0 into low-order input register. |
1941 | 44 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1942 | 44 | TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg); |
1943 | 44 | // Zero-extend or sign-extend into high-order input register. |
1944 | 44 | if (OpEntry.OpSignExtend) { |
1945 | 34 | if (OpEntry.IsOpSigned) |
1946 | 18 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1947 | 18 | TII.get(OpEntry.OpSignExtend)); |
1948 | 16 | else { |
1949 | 16 | unsigned Zero32 = createResultReg(&X86::GR32RegClass); |
1950 | 16 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1951 | 16 | TII.get(X86::MOV32r0), Zero32); |
1952 | 16 | |
1953 | 16 | // Copy the zero into the appropriate sub/super/identical physical |
1954 | 16 | // register. Unfortunately the operations needed are not uniform enough |
1955 | 16 | // to fit neatly into the table above. |
1956 | 16 | if (VT == MVT::i16) { |
1957 | 4 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1958 | 4 | TII.get(Copy), TypeEntry.HighInReg) |
1959 | 4 | .addReg(Zero32, 0, X86::sub_16bit); |
1960 | 12 | } else if (VT == MVT::i32) { |
1961 | 4 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1962 | 4 | TII.get(Copy), TypeEntry.HighInReg) |
1963 | 4 | .addReg(Zero32); |
1964 | 8 | } else if (VT == MVT::i64) { |
1965 | 8 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1966 | 8 | TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg) |
1967 | 8 | .addImm(0).addReg(Zero32).addImm(X86::sub_32bit); |
1968 | 8 | } |
1969 | 16 | } |
1970 | 34 | } |
1971 | 44 | // Generate the DIV/IDIV instruction. |
1972 | 44 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1973 | 44 | TII.get(OpEntry.OpDivRem)).addReg(Op1Reg); |
1974 | 44 | // For i8 remainder, we can't reference ah directly, as we'll end |
1975 | 44 | // up with bogus copies like %r9b = COPY %ah. Reference ax |
1976 | 44 | // instead to prevent ah references in a rex instruction. |
1977 | 44 | // |
1978 | 44 | // The current assumption of the fast register allocator is that isel |
1979 | 44 | // won't generate explicit references to the GR8_NOREX registers. If |
1980 | 44 | // the allocator and/or the backend get enhanced to be more robust in |
1981 | 44 | // that regard, this can be, and should be, removed. |
1982 | 44 | unsigned ResultReg = 0; |
1983 | 44 | if ((I->getOpcode() == Instruction::SRem || |
1984 | 44 | I->getOpcode() == Instruction::URem33 ) && |
1985 | 44 | OpEntry.DivRemResultReg == X86::AH22 && Subtarget->is64Bit()5 ) { |
1986 | 3 | unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass); |
1987 | 3 | unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass); |
1988 | 3 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
1989 | 3 | TII.get(Copy), SourceSuperReg).addReg(X86::AX); |
1990 | 3 | |
1991 | 3 | // Shift AX right by 8 bits instead of using AH. |
1992 | 3 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri), |
1993 | 3 | ResultSuperReg).addReg(SourceSuperReg).addImm(8); |
1994 | 3 | |
1995 | 3 | // Now reference the 8-bit subreg of the result. |
1996 | 3 | ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, |
1997 | 3 | /*Kill=*/true, X86::sub_8bit); |
1998 | 3 | } |
1999 | 44 | // Copy the result out of the physreg if we haven't already. |
2000 | 44 | if (!ResultReg) { |
2001 | 41 | ResultReg = createResultReg(TypeEntry.RC); |
2002 | 41 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg) |
2003 | 41 | .addReg(OpEntry.DivRemResultReg); |
2004 | 41 | } |
2005 | 44 | updateValueMap(I, ResultReg); |
2006 | 44 | |
2007 | 44 | return true; |
2008 | 44 | } |
2009 | | |
2010 | | /// Emit a conditional move instruction (if the are supported) to lower |
2011 | | /// the select. |
2012 | 1.49k | bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { |
2013 | 1.49k | // Check if the subtarget supports these instructions. |
2014 | 1.49k | if (!Subtarget->hasCMov()) |
2015 | 0 | return false; |
2016 | 1.49k | |
2017 | 1.49k | // FIXME: Add support for i8. |
2018 | 1.49k | if (RetVT < MVT::i16 || RetVT > MVT::i641.49k ) |
2019 | 1.37k | return false; |
2020 | 116 | |
2021 | 116 | const Value *Cond = I->getOperand(0); |
2022 | 116 | const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); |
2023 | 116 | bool NeedTest = true; |
2024 | 116 | X86::CondCode CC = X86::COND_NE; |
2025 | 116 | |
2026 | 116 | // Optimize conditions coming from a compare if both instructions are in the |
2027 | 116 | // same basic block (values defined in other basic blocks may not have |
2028 | 116 | // initialized registers). |
2029 | 116 | const auto *CI = dyn_cast<CmpInst>(Cond); |
2030 | 116 | if (CI && (CI->getParent() == I->getParent())92 ) { |
2031 | 91 | CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); |
2032 | 91 | |
2033 | 91 | // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction. |
2034 | 91 | static const uint16_t SETFOpcTable[2][3] = { |
2035 | 91 | { X86::COND_NP, X86::COND_E, X86::TEST8rr }, |
2036 | 91 | { X86::COND_P, X86::COND_NE, X86::OR8rr } |
2037 | 91 | }; |
2038 | 91 | const uint16_t *SETFOpc = nullptr; |
2039 | 91 | switch (Predicate) { |
2040 | 91 | default: break85 ; |
2041 | 91 | case CmpInst::FCMP_OEQ: |
2042 | 3 | SETFOpc = &SETFOpcTable[0][0]; |
2043 | 3 | Predicate = CmpInst::ICMP_NE; |
2044 | 3 | break; |
2045 | 91 | case CmpInst::FCMP_UNE: |
2046 | 3 | SETFOpc = &SETFOpcTable[1][0]; |
2047 | 3 | Predicate = CmpInst::ICMP_NE; |
2048 | 3 | break; |
2049 | 91 | } |
2050 | 91 | |
2051 | 91 | bool NeedSwap; |
2052 | 91 | std::tie(CC, NeedSwap) = X86::getX86ConditionCode(Predicate); |
2053 | 91 | assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); |
2054 | 91 | |
2055 | 91 | const Value *CmpLHS = CI->getOperand(0); |
2056 | 91 | const Value *CmpRHS = CI->getOperand(1); |
2057 | 91 | if (NeedSwap) |
2058 | 12 | std::swap(CmpLHS, CmpRHS); |
2059 | 91 | |
2060 | 91 | EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType()); |
2061 | 91 | // Emit a compare of the LHS and RHS, setting the flags. |
2062 | 91 | if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc())) |
2063 | 0 | return false; |
2064 | 91 | |
2065 | 91 | if (SETFOpc) { |
2066 | 6 | unsigned FlagReg1 = createResultReg(&X86::GR8RegClass); |
2067 | 6 | unsigned FlagReg2 = createResultReg(&X86::GR8RegClass); |
2068 | 6 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), |
2069 | 6 | FlagReg1).addImm(SETFOpc[0]); |
2070 | 6 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), |
2071 | 6 | FlagReg2).addImm(SETFOpc[1]); |
2072 | 6 | auto const &II = TII.get(SETFOpc[2]); |
2073 | 6 | if (II.getNumDefs()) { |
2074 | 3 | unsigned TmpReg = createResultReg(&X86::GR8RegClass); |
2075 | 3 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg) |
2076 | 3 | .addReg(FlagReg2).addReg(FlagReg1); |
2077 | 3 | } else { |
2078 | 3 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) |
2079 | 3 | .addReg(FlagReg2).addReg(FlagReg1); |
2080 | 3 | } |
2081 | 6 | } |
2082 | 91 | NeedTest = false; |
2083 | 91 | } else if (25 foldX86XALUIntrinsic(CC, I, Cond)25 ) { |
2084 | 14 | // Fake request the condition, otherwise the intrinsic might be completely |
2085 | 14 | // optimized away. |
2086 | 14 | unsigned TmpReg = getRegForValue(Cond); |
2087 | 14 | if (TmpReg == 0) |
2088 | 0 | return false; |
2089 | 14 | |
2090 | 14 | NeedTest = false; |
2091 | 14 | } |
2092 | 116 | |
2093 | 116 | if (NeedTest) { |
2094 | 11 | // Selects operate on i1, however, CondReg is 8 bits width and may contain |
2095 | 11 | // garbage. Indeed, only the less significant bit is supposed to be |
2096 | 11 | // accurate. If we read more than the lsb, we may see non-zero values |
2097 | 11 | // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for |
2098 | 11 | // the select. This is achieved by performing TEST against 1. |
2099 | 11 | unsigned CondReg = getRegForValue(Cond); |
2100 | 11 | if (CondReg == 0) |
2101 | 0 | return false; |
2102 | 11 | bool CondIsKill = hasTrivialKill(Cond); |
2103 | 11 | |
2104 | 11 | // In case OpReg is a K register, COPY to a GPR |
2105 | 11 | if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { |
2106 | 0 | unsigned KCondReg = CondReg; |
2107 | 0 | CondReg = createResultReg(&X86::GR32RegClass); |
2108 | 0 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2109 | 0 | TII.get(TargetOpcode::COPY), CondReg) |
2110 | 0 | .addReg(KCondReg, getKillRegState(CondIsKill)); |
2111 | 0 | CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Kill=*/true, |
2112 | 0 | X86::sub_8bit); |
2113 | 0 | } |
2114 | 11 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) |
2115 | 11 | .addReg(CondReg, getKillRegState(CondIsKill)) |
2116 | 11 | .addImm(1); |
2117 | 11 | } |
2118 | 116 | |
2119 | 116 | const Value *LHS = I->getOperand(1); |
2120 | 116 | const Value *RHS = I->getOperand(2); |
2121 | 116 | |
2122 | 116 | unsigned RHSReg = getRegForValue(RHS); |
2123 | 116 | bool RHSIsKill = hasTrivialKill(RHS); |
2124 | 116 | |
2125 | 116 | unsigned LHSReg = getRegForValue(LHS); |
2126 | 116 | bool LHSIsKill = hasTrivialKill(LHS); |
2127 | 116 | |
2128 | 116 | if (!LHSReg || !RHSReg) |
2129 | 0 | return false; |
2130 | 116 | |
2131 | 116 | const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo(); |
2132 | 116 | unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8); |
2133 | 116 | unsigned ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, |
2134 | 116 | LHSReg, LHSIsKill, CC); |
2135 | 116 | updateValueMap(I, ResultReg); |
2136 | 116 | return true; |
2137 | 116 | } |
2138 | | |
2139 | | /// Emit SSE or AVX instructions to lower the select. |
2140 | | /// |
2141 | | /// Try to use SSE1/SSE2 instructions to simulate a select without branches. |
2142 | | /// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary |
2143 | | /// SSE instructions are available. If AVX is available, try to use a VBLENDV. |
2144 | 1.37k | bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { |
2145 | 1.37k | // Optimize conditions coming from a compare if both instructions are in the |
2146 | 1.37k | // same basic block (values defined in other basic blocks may not have |
2147 | 1.37k | // initialized registers). |
2148 | 1.37k | const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0)); |
2149 | 1.37k | if (!CI || (CI->getParent() != I->getParent())80 ) |
2150 | 1.30k | return false; |
2151 | 78 | |
2152 | 78 | if (I->getType() != CI->getOperand(0)->getType() || |
2153 | 78 | !((Subtarget->hasSSE1() && RetVT == MVT::f32) || |
2154 | 78 | (39 Subtarget->hasSSE2()39 && RetVT == MVT::f6439 ))) |
2155 | 0 | return false; |
2156 | 78 | |
2157 | 78 | const Value *CmpLHS = CI->getOperand(0); |
2158 | 78 | const Value *CmpRHS = CI->getOperand(1); |
2159 | 78 | CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); |
2160 | 78 | |
2161 | 78 | // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0. |
2162 | 78 | // We don't have to materialize a zero constant for this case and can just use |
2163 | 78 | // %x again on the RHS. |
2164 | 78 | if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO72 ) { |
2165 | 12 | const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS); |
2166 | 12 | if (CmpRHSC && CmpRHSC->isNullValue()0 ) |
2167 | 0 | CmpRHS = CmpLHS; |
2168 | 12 | } |
2169 | 78 | |
2170 | 78 | unsigned CC; |
2171 | 78 | bool NeedSwap; |
2172 | 78 | std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate); |
2173 | 78 | if (CC > 7 && !Subtarget->hasAVX()6 ) |
2174 | 2 | return false; |
2175 | 76 | |
2176 | 76 | if (NeedSwap) |
2177 | 24 | std::swap(CmpLHS, CmpRHS); |
2178 | 76 | |
2179 | 76 | const Value *LHS = I->getOperand(1); |
2180 | 76 | const Value *RHS = I->getOperand(2); |
2181 | 76 | |
2182 | 76 | unsigned LHSReg = getRegForValue(LHS); |
2183 | 76 | bool LHSIsKill = hasTrivialKill(LHS); |
2184 | 76 | |
2185 | 76 | unsigned RHSReg = getRegForValue(RHS); |
2186 | 76 | bool RHSIsKill = hasTrivialKill(RHS); |
2187 | 76 | |
2188 | 76 | unsigned CmpLHSReg = getRegForValue(CmpLHS); |
2189 | 76 | bool CmpLHSIsKill = hasTrivialKill(CmpLHS); |
2190 | 76 | |
2191 | 76 | unsigned CmpRHSReg = getRegForValue(CmpRHS); |
2192 | 76 | bool CmpRHSIsKill = hasTrivialKill(CmpRHS); |
2193 | 76 | |
2194 | 76 | if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS) |
2195 | 0 | return false; |
2196 | 76 | |
2197 | 76 | const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); |
2198 | 76 | unsigned ResultReg; |
2199 | 76 | |
2200 | 76 | if (Subtarget->hasAVX512()) { |
2201 | 26 | // If we have AVX512 we can use a mask compare and masked movss/sd. |
2202 | 26 | const TargetRegisterClass *VR128X = &X86::VR128XRegClass; |
2203 | 26 | const TargetRegisterClass *VK1 = &X86::VK1RegClass; |
2204 | 26 | |
2205 | 26 | unsigned CmpOpcode = |
2206 | 26 | (RetVT == MVT::f32) ? X86::VCMPSSZrr13 : X86::VCMPSDZrr13 ; |
2207 | 26 | unsigned CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill, |
2208 | 26 | CmpRHSReg, CmpRHSIsKill, CC); |
2209 | 26 | |
2210 | 26 | // Need an IMPLICIT_DEF for the input that is used to generate the upper |
2211 | 26 | // bits of the result register since its not based on any of the inputs. |
2212 | 26 | unsigned ImplicitDefReg = createResultReg(VR128X); |
2213 | 26 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2214 | 26 | TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); |
2215 | 26 | |
2216 | 26 | // Place RHSReg is the passthru of the masked movss/sd operation and put |
2217 | 26 | // LHS in the input. The mask input comes from the compare. |
2218 | 26 | unsigned MovOpcode = |
2219 | 26 | (RetVT == MVT::f32) ? X86::VMOVSSZrrk13 : X86::VMOVSDZrrk13 ; |
2220 | 26 | unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, RHSIsKill, |
2221 | 26 | CmpReg, true, ImplicitDefReg, true, |
2222 | 26 | LHSReg, LHSIsKill); |
2223 | 26 | |
2224 | 26 | ResultReg = createResultReg(RC); |
2225 | 26 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2226 | 26 | TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg); |
2227 | 26 | |
2228 | 50 | } else if (Subtarget->hasAVX()) { |
2229 | 26 | const TargetRegisterClass *VR128 = &X86::VR128RegClass; |
2230 | 26 | |
2231 | 26 | // If we have AVX, create 1 blendv instead of 3 logic instructions. |
2232 | 26 | // Blendv was introduced with SSE 4.1, but the 2 register form implicitly |
2233 | 26 | // uses XMM0 as the selection register. That may need just as many |
2234 | 26 | // instructions as the AND/ANDN/OR sequence due to register moves, so |
2235 | 26 | // don't bother. |
2236 | 26 | unsigned CmpOpcode = |
2237 | 26 | (RetVT == MVT::f32) ? X86::VCMPSSrr13 : X86::VCMPSDrr13 ; |
2238 | 26 | unsigned BlendOpcode = |
2239 | 26 | (RetVT == MVT::f32) ? X86::VBLENDVPSrr13 : X86::VBLENDVPDrr13 ; |
2240 | 26 | |
2241 | 26 | unsigned CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill, |
2242 | 26 | CmpRHSReg, CmpRHSIsKill, CC); |
2243 | 26 | unsigned VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill, |
2244 | 26 | LHSReg, LHSIsKill, CmpReg, true); |
2245 | 26 | ResultReg = createResultReg(RC); |
2246 | 26 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2247 | 26 | TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg); |
2248 | 26 | } else { |
2249 | 24 | // Choose the SSE instruction sequence based on data type (float or double). |
2250 | 24 | static const uint16_t OpcTable[2][4] = { |
2251 | 24 | { X86::CMPSSrr, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr }, |
2252 | 24 | { X86::CMPSDrr, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr } |
2253 | 24 | }; |
2254 | 24 | |
2255 | 24 | const uint16_t *Opc = nullptr; |
2256 | 24 | switch (RetVT.SimpleTy) { |
2257 | 24 | default: return false0 ; |
2258 | 24 | case MVT::f32: Opc = &OpcTable[0][0]; break12 ; |
2259 | 24 | case MVT::f64: Opc = &OpcTable[1][0]; break12 ; |
2260 | 24 | } |
2261 | 24 | |
2262 | 24 | const TargetRegisterClass *VR128 = &X86::VR128RegClass; |
2263 | 24 | unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, |
2264 | 24 | CmpRHSReg, CmpRHSIsKill, CC); |
2265 | 24 | unsigned AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, /*IsKill=*/false, |
2266 | 24 | LHSReg, LHSIsKill); |
2267 | 24 | unsigned AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, /*IsKill=*/true, |
2268 | 24 | RHSReg, RHSIsKill); |
2269 | 24 | unsigned OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*IsKill=*/true, |
2270 | 24 | AndReg, /*IsKill=*/true); |
2271 | 24 | ResultReg = createResultReg(RC); |
2272 | 24 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2273 | 24 | TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg); |
2274 | 24 | } |
2275 | 76 | updateValueMap(I, ResultReg); |
2276 | 76 | return true; |
2277 | 76 | } |
2278 | | |
2279 | 1.30k | bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { |
2280 | 1.30k | // These are pseudo CMOV instructions and will be later expanded into control- |
2281 | 1.30k | // flow. |
2282 | 1.30k | unsigned Opc; |
2283 | 1.30k | switch (RetVT.SimpleTy) { |
2284 | 1.30k | default: return false1.26k ; |
2285 | 1.30k | case MVT::i8: Opc = X86::CMOV_GR8; break3 ; |
2286 | 1.30k | case MVT::i16: Opc = X86::CMOV_GR16; break0 ; |
2287 | 1.30k | case MVT::i32: Opc = X86::CMOV_GR32; break0 ; |
2288 | 1.30k | case MVT::f32: Opc = Subtarget->hasAVX512() 33 ? X86::CMOV_FR32X10 |
2289 | 33 | : X86::CMOV_FR3223 ; break; |
2290 | 1.30k | case MVT::f64: Opc = Subtarget->hasAVX512() 1 ? X86::CMOV_FR64X0 |
2291 | 1 | : X86::CMOV_FR64; break; |
2292 | 37 | } |
2293 | 37 | |
2294 | 37 | const Value *Cond = I->getOperand(0); |
2295 | 37 | X86::CondCode CC = X86::COND_NE; |
2296 | 37 | |
2297 | 37 | // Optimize conditions coming from a compare if both instructions are in the |
2298 | 37 | // same basic block (values defined in other basic blocks may not have |
2299 | 37 | // initialized registers). |
2300 | 37 | const auto *CI = dyn_cast<CmpInst>(Cond); |
2301 | 37 | if (CI && (CI->getParent() == I->getParent())) { |
2302 | 35 | bool NeedSwap; |
2303 | 35 | std::tie(CC, NeedSwap) = X86::getX86ConditionCode(CI->getPredicate()); |
2304 | 35 | if (CC > X86::LAST_VALID_COND) |
2305 | 0 | return false; |
2306 | 35 | |
2307 | 35 | const Value *CmpLHS = CI->getOperand(0); |
2308 | 35 | const Value *CmpRHS = CI->getOperand(1); |
2309 | 35 | |
2310 | 35 | if (NeedSwap) |
2311 | 0 | std::swap(CmpLHS, CmpRHS); |
2312 | 35 | |
2313 | 35 | EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType()); |
2314 | 35 | if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc())) |
2315 | 0 | return false; |
2316 | 2 | } else { |
2317 | 2 | unsigned CondReg = getRegForValue(Cond); |
2318 | 2 | if (CondReg == 0) |
2319 | 0 | return false; |
2320 | 2 | bool CondIsKill = hasTrivialKill(Cond); |
2321 | 2 | |
2322 | 2 | // In case OpReg is a K register, COPY to a GPR |
2323 | 2 | if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { |
2324 | 0 | unsigned KCondReg = CondReg; |
2325 | 0 | CondReg = createResultReg(&X86::GR32RegClass); |
2326 | 0 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2327 | 0 | TII.get(TargetOpcode::COPY), CondReg) |
2328 | 0 | .addReg(KCondReg, getKillRegState(CondIsKill)); |
2329 | 0 | CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Kill=*/true, |
2330 | 0 | X86::sub_8bit); |
2331 | 0 | } |
2332 | 2 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) |
2333 | 2 | .addReg(CondReg, getKillRegState(CondIsKill)) |
2334 | 2 | .addImm(1); |
2335 | 2 | } |
2336 | 37 | |
2337 | 37 | const Value *LHS = I->getOperand(1); |
2338 | 37 | const Value *RHS = I->getOperand(2); |
2339 | 37 | |
2340 | 37 | unsigned LHSReg = getRegForValue(LHS); |
2341 | 37 | bool LHSIsKill = hasTrivialKill(LHS); |
2342 | 37 | |
2343 | 37 | unsigned RHSReg = getRegForValue(RHS); |
2344 | 37 | bool RHSIsKill = hasTrivialKill(RHS); |
2345 | 37 | |
2346 | 37 | if (!LHSReg || !RHSReg) |
2347 | 0 | return false; |
2348 | 37 | |
2349 | 37 | const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); |
2350 | 37 | |
2351 | 37 | unsigned ResultReg = |
2352 | 37 | fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC); |
2353 | 37 | updateValueMap(I, ResultReg); |
2354 | 37 | return true; |
2355 | 37 | } |
2356 | | |
2357 | 1.50k | bool X86FastISel::X86SelectSelect(const Instruction *I) { |
2358 | 1.50k | MVT RetVT; |
2359 | 1.50k | if (!isTypeLegal(I->getType(), RetVT)) |
2360 | 1 | return false; |
2361 | 1.50k | |
2362 | 1.50k | // Check if we can fold the select. |
2363 | 1.50k | if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) { |
2364 | 331 | CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); |
2365 | 331 | const Value *Opnd = nullptr; |
2366 | 331 | switch (Predicate) { |
2367 | 331 | default: break325 ; |
2368 | 331 | case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break3 ; |
2369 | 331 | case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break3 ; |
2370 | 331 | } |
2371 | 331 | // No need for a select anymore - this is an unconditional move. |
2372 | 331 | if (Opnd) { |
2373 | 6 | unsigned OpReg = getRegForValue(Opnd); |
2374 | 6 | if (OpReg == 0) |
2375 | 0 | return false; |
2376 | 6 | bool OpIsKill = hasTrivialKill(Opnd); |
2377 | 6 | const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); |
2378 | 6 | unsigned ResultReg = createResultReg(RC); |
2379 | 6 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2380 | 6 | TII.get(TargetOpcode::COPY), ResultReg) |
2381 | 6 | .addReg(OpReg, getKillRegState(OpIsKill)); |
2382 | 6 | updateValueMap(I, ResultReg); |
2383 | 6 | return true; |
2384 | 6 | } |
2385 | 331 | } |
2386 | 1.49k | |
2387 | 1.49k | // First try to use real conditional move instructions. |
2388 | 1.49k | if (X86FastEmitCMoveSelect(RetVT, I)) |
2389 | 116 | return true; |
2390 | 1.37k | |
2391 | 1.37k | // Try to use a sequence of SSE instructions to simulate a conditional move. |
2392 | 1.37k | if (X86FastEmitSSESelect(RetVT, I)) |
2393 | 76 | return true; |
2394 | 1.30k | |
2395 | 1.30k | // Fall-back to pseudo conditional move instructions, which will be later |
2396 | 1.30k | // converted to control-flow. |
2397 | 1.30k | if (X86FastEmitPseudoSelect(RetVT, I)) |
2398 | 37 | return true; |
2399 | 1.26k | |
2400 | 1.26k | return false; |
2401 | 1.26k | } |
2402 | | |
2403 | | // Common code for X86SelectSIToFP and X86SelectUIToFP. |
2404 | 52 | bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) { |
2405 | 52 | // The target-independent selection algorithm in FastISel already knows how |
2406 | 52 | // to select a SINT_TO_FP if the target is SSE but not AVX. |
2407 | 52 | // Early exit if the subtarget doesn't have AVX. |
2408 | 52 | // Unsigned conversion requires avx512. |
2409 | 52 | bool HasAVX512 = Subtarget->hasAVX512(); |
2410 | 52 | if (!Subtarget->hasAVX() || (48 !IsSigned48 && !HasAVX51220 )) |
2411 | 4 | return false; |
2412 | 48 | |
2413 | 48 | // TODO: We could sign extend narrower types. |
2414 | 48 | MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); |
2415 | 48 | if (SrcVT != MVT::i32 && SrcVT != MVT::i6430 ) |
2416 | 12 | return false; |
2417 | 36 | |
2418 | 36 | // Select integer to float/double conversion. |
2419 | 36 | unsigned OpReg = getRegForValue(I->getOperand(0)); |
2420 | 36 | if (OpReg == 0) |
2421 | 0 | return false; |
2422 | 36 | |
2423 | 36 | unsigned Opcode; |
2424 | 36 | |
2425 | 36 | static const uint16_t SCvtOpc[2][2][2] = { |
2426 | 36 | { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr }, |
2427 | 36 | { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } }, |
2428 | 36 | { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr }, |
2429 | 36 | { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } }, |
2430 | 36 | }; |
2431 | 36 | static const uint16_t UCvtOpc[2][2] = { |
2432 | 36 | { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr }, |
2433 | 36 | { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr }, |
2434 | 36 | }; |
2435 | 36 | bool Is64Bit = SrcVT == MVT::i64; |
2436 | 36 | |
2437 | 36 | if (I->getType()->isDoubleTy()) { |
2438 | 18 | // s/uitofp int -> double |
2439 | 18 | Opcode = IsSigned ? SCvtOpc[HasAVX512][1][Is64Bit]12 : UCvtOpc[1][Is64Bit]6 ; |
2440 | 18 | } else if (I->getType()->isFloatTy()) { |
2441 | 18 | // s/uitofp int -> float |
2442 | 18 | Opcode = IsSigned ? SCvtOpc[HasAVX512][0][Is64Bit]12 : UCvtOpc[0][Is64Bit]6 ; |
2443 | 18 | } else |
2444 | 0 | return false; |
2445 | 36 | |
2446 | 36 | MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT(); |
2447 | 36 | const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT); |
2448 | 36 | unsigned ImplicitDefReg = createResultReg(RC); |
2449 | 36 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2450 | 36 | TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); |
2451 | 36 | unsigned ResultReg = |
2452 | 36 | fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false); |
2453 | 36 | updateValueMap(I, ResultReg); |
2454 | 36 | return true; |
2455 | 36 | } |
2456 | | |
2457 | 31 | bool X86FastISel::X86SelectSIToFP(const Instruction *I) { |
2458 | 31 | return X86SelectIntToFP(I, /*IsSigned*/true); |
2459 | 31 | } |
2460 | | |
2461 | 21 | bool X86FastISel::X86SelectUIToFP(const Instruction *I) { |
2462 | 21 | return X86SelectIntToFP(I, /*IsSigned*/false); |
2463 | 21 | } |
2464 | | |
2465 | | // Helper method used by X86SelectFPExt and X86SelectFPTrunc. |
2466 | | bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I, |
2467 | | unsigned TargetOpc, |
2468 | 26 | const TargetRegisterClass *RC) { |
2469 | 26 | assert((I->getOpcode() == Instruction::FPExt || |
2470 | 26 | I->getOpcode() == Instruction::FPTrunc) && |
2471 | 26 | "Instruction must be an FPExt or FPTrunc!"); |
2472 | 26 | bool HasAVX = Subtarget->hasAVX(); |
2473 | 26 | |
2474 | 26 | unsigned OpReg = getRegForValue(I->getOperand(0)); |
2475 | 26 | if (OpReg == 0) |
2476 | 0 | return false; |
2477 | 26 | |
2478 | 26 | unsigned ImplicitDefReg; |
2479 | 26 | if (HasAVX) { |
2480 | 12 | ImplicitDefReg = createResultReg(RC); |
2481 | 12 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2482 | 12 | TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); |
2483 | 12 | |
2484 | 12 | } |
2485 | 26 | |
2486 | 26 | unsigned ResultReg = createResultReg(RC); |
2487 | 26 | MachineInstrBuilder MIB; |
2488 | 26 | MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc), |
2489 | 26 | ResultReg); |
2490 | 26 | |
2491 | 26 | if (HasAVX) |
2492 | 12 | MIB.addReg(ImplicitDefReg); |
2493 | 26 | |
2494 | 26 | MIB.addReg(OpReg); |
2495 | 26 | updateValueMap(I, ResultReg); |
2496 | 26 | return true; |
2497 | 26 | } |
2498 | | |
2499 | 29 | bool X86FastISel::X86SelectFPExt(const Instruction *I) { |
2500 | 29 | if (X86ScalarSSEf64 && I->getType()->isDoubleTy() && |
2501 | 29 | I->getOperand(0)->getType()->isFloatTy()17 ) { |
2502 | 17 | bool HasAVX512 = Subtarget->hasAVX512(); |
2503 | 17 | // fpext from float to double. |
2504 | 17 | unsigned Opc = |
2505 | 17 | HasAVX512 ? X86::VCVTSS2SDZrr3 |
2506 | 17 | : Subtarget->hasAVX() 14 ? X86::VCVTSS2SDrr3 : X86::CVTSS2SDrr11 ; |
2507 | 17 | return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f64)); |
2508 | 17 | } |
2509 | 12 | |
2510 | 12 | return false; |
2511 | 12 | } |
2512 | | |
2513 | 9 | bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { |
2514 | 9 | if (X86ScalarSSEf64 && I->getType()->isFloatTy() && |
2515 | 9 | I->getOperand(0)->getType()->isDoubleTy()) { |
2516 | 9 | bool HasAVX512 = Subtarget->hasAVX512(); |
2517 | 9 | // fptrunc from double to float. |
2518 | 9 | unsigned Opc = |
2519 | 9 | HasAVX512 ? X86::VCVTSD2SSZrr3 |
2520 | 9 | : Subtarget->hasAVX() 6 ? X86::VCVTSD2SSrr3 : X86::CVTSD2SSrr3 ; |
2521 | 9 | return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f32)); |
2522 | 9 | } |
2523 | 0 | |
2524 | 0 | return false; |
2525 | 0 | } |
2526 | | |
2527 | 33 | bool X86FastISel::X86SelectTrunc(const Instruction *I) { |
2528 | 33 | EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); |
2529 | 33 | EVT DstVT = TLI.getValueType(DL, I->getType()); |
2530 | 33 | |
2531 | 33 | // This code only handles truncation to byte. |
2532 | 33 | if (DstVT != MVT::i8 && DstVT != MVT::i126 ) |
2533 | 2 | return false; |
2534 | 31 | if (!TLI.isTypeLegal(SrcVT)) |
2535 | 2 | return false; |
2536 | 29 | |
2537 | 29 | unsigned InputReg = getRegForValue(I->getOperand(0)); |
2538 | 29 | if (!InputReg) |
2539 | 0 | // Unhandled operand. Halt "fast" selection and bail. |
2540 | 0 | return false; |
2541 | 29 | |
2542 | 29 | if (SrcVT == MVT::i8) { |
2543 | 6 | // Truncate from i8 to i1; no code needed. |
2544 | 6 | updateValueMap(I, InputReg); |
2545 | 6 | return true; |
2546 | 6 | } |
2547 | 23 | |
2548 | 23 | // Issue an extract_subreg. |
2549 | 23 | unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8, |
2550 | 23 | InputReg, false, |
2551 | 23 | X86::sub_8bit); |
2552 | 23 | if (!ResultReg) |
2553 | 0 | return false; |
2554 | 23 | |
2555 | 23 | updateValueMap(I, ResultReg); |
2556 | 23 | return true; |
2557 | 23 | } |
2558 | | |
2559 | 47 | bool X86FastISel::IsMemcpySmall(uint64_t Len) { |
2560 | 47 | return Len <= (Subtarget->is64Bit() ? 3242 : 165 ); |
2561 | 47 | } |
2562 | | |
2563 | | bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM, |
2564 | 26 | X86AddressMode SrcAM, uint64_t Len) { |
2565 | 26 | |
2566 | 26 | // Make sure we don't bloat code by inlining very large memcpy's. |
2567 | 26 | if (!IsMemcpySmall(Len)) |
2568 | 1 | return false; |
2569 | 25 | |
2570 | 25 | bool i64Legal = Subtarget->is64Bit(); |
2571 | 25 | |
2572 | 25 | // We don't care about alignment here since we just emit integer accesses. |
2573 | 74 | while (Len) { |
2574 | 49 | MVT VT; |
2575 | 49 | if (Len >= 8 && i64Legal33 ) |
2576 | 29 | VT = MVT::i64; |
2577 | 20 | else if (Len >= 4) |
2578 | 20 | VT = MVT::i32; |
2579 | 0 | else if (Len >= 2) |
2580 | 0 | VT = MVT::i16; |
2581 | 0 | else |
2582 | 0 | VT = MVT::i8; |
2583 | 49 | |
2584 | 49 | unsigned Reg; |
2585 | 49 | bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg); |
2586 | 49 | RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM); |
2587 | 49 | assert(RV && "Failed to emit load or store??"); |
2588 | 49 | |
2589 | 49 | unsigned Size = VT.getSizeInBits()/8; |
2590 | 49 | Len -= Size; |
2591 | 49 | DestAM.Disp += Size; |
2592 | 49 | SrcAM.Disp += Size; |
2593 | 49 | } |
2594 | 25 | |
2595 | 25 | return true; |
2596 | 25 | } |
2597 | | |
2598 | 2.43k | bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { |
2599 | 2.43k | // FIXME: Handle more intrinsics. |
2600 | 2.43k | switch (II->getIntrinsicID()) { |
2601 | 2.43k | default: return false2.21k ; |
2602 | 2.43k | case Intrinsic::convert_from_fp16: |
2603 | 2 | case Intrinsic::convert_to_fp16: { |
2604 | 2 | if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) |
2605 | 0 | return false; |
2606 | 2 | |
2607 | 2 | const Value *Op = II->getArgOperand(0); |
2608 | 2 | unsigned InputReg = getRegForValue(Op); |
2609 | 2 | if (InputReg == 0) |
2610 | 0 | return false; |
2611 | 2 | |
2612 | 2 | // F16C only allows converting from float to half and from half to float. |
2613 | 2 | bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16; |
2614 | 2 | if (IsFloatToHalf) { |
2615 | 1 | if (!Op->getType()->isFloatTy()) |
2616 | 0 | return false; |
2617 | 1 | } else { |
2618 | 1 | if (!II->getType()->isFloatTy()) |
2619 | 0 | return false; |
2620 | 2 | } |
2621 | 2 | |
2622 | 2 | unsigned ResultReg = 0; |
2623 | 2 | const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16); |
2624 | 2 | if (IsFloatToHalf) { |
2625 | 1 | // 'InputReg' is implicitly promoted from register class FR32 to |
2626 | 1 | // register class VR128 by method 'constrainOperandRegClass' which is |
2627 | 1 | // directly called by 'fastEmitInst_ri'. |
2628 | 1 | // Instruction VCVTPS2PHrr takes an extra immediate operand which is |
2629 | 1 | // used to provide rounding control: use MXCSR.RC, encoded as 0b100. |
2630 | 1 | // It's consistent with the other FP instructions, which are usually |
2631 | 1 | // controlled by MXCSR. |
2632 | 1 | InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 4); |
2633 | 1 | |
2634 | 1 | // Move the lower 32-bits of ResultReg to another register of class GR32. |
2635 | 1 | ResultReg = createResultReg(&X86::GR32RegClass); |
2636 | 1 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2637 | 1 | TII.get(X86::VMOVPDI2DIrr), ResultReg) |
2638 | 1 | .addReg(InputReg, RegState::Kill); |
2639 | 1 | |
2640 | 1 | // The result value is in the lower 16-bits of ResultReg. |
2641 | 1 | unsigned RegIdx = X86::sub_16bit; |
2642 | 1 | ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx); |
2643 | 1 | } else { |
2644 | 1 | assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!"); |
2645 | 1 | // Explicitly sign-extend the input to 32-bit. |
2646 | 1 | InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::SIGN_EXTEND, InputReg, |
2647 | 1 | /*Kill=*/false); |
2648 | 1 | |
2649 | 1 | // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr. |
2650 | 1 | InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR, |
2651 | 1 | InputReg, /*Kill=*/true); |
2652 | 1 | |
2653 | 1 | InputReg = fastEmitInst_r(X86::VCVTPH2PSrr, RC, InputReg, /*Kill=*/true); |
2654 | 1 | |
2655 | 1 | // The result value is in the lower 32-bits of ResultReg. |
2656 | 1 | // Emit an explicit copy from register class VR128 to register class FR32. |
2657 | 1 | ResultReg = createResultReg(&X86::FR32RegClass); |
2658 | 1 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2659 | 1 | TII.get(TargetOpcode::COPY), ResultReg) |
2660 | 1 | .addReg(InputReg, RegState::Kill); |
2661 | 1 | } |
2662 | 2 | |
2663 | 2 | updateValueMap(II, ResultReg); |
2664 | 2 | return true; |
2665 | 2 | } |
2666 | 10 | case Intrinsic::frameaddress: { |
2667 | 10 | MachineFunction *MF = FuncInfo.MF; |
2668 | 10 | if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI()) |
2669 | 2 | return false; |
2670 | 8 | |
2671 | 8 | Type *RetTy = II->getCalledFunction()->getReturnType(); |
2672 | 8 | |
2673 | 8 | MVT VT; |
2674 | 8 | if (!isTypeLegal(RetTy, VT)) |
2675 | 0 | return false; |
2676 | 8 | |
2677 | 8 | unsigned Opc; |
2678 | 8 | const TargetRegisterClass *RC = nullptr; |
2679 | 8 | |
2680 | 8 | switch (VT.SimpleTy) { |
2681 | 8 | default: 0 llvm_unreachable0 ("Invalid result type for frameaddress."); |
2682 | 8 | case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break6 ; |
2683 | 8 | case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break2 ; |
2684 | 8 | } |
2685 | 8 | |
2686 | 8 | // This needs to be set before we call getPtrSizedFrameRegister, otherwise |
2687 | 8 | // we get the wrong frame register. |
2688 | 8 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
2689 | 8 | MFI.setFrameAddressIsTaken(true); |
2690 | 8 | |
2691 | 8 | const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); |
2692 | 8 | unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF); |
2693 | 8 | assert(((FrameReg == X86::RBP && VT == MVT::i64) || |
2694 | 8 | (FrameReg == X86::EBP && VT == MVT::i32)) && |
2695 | 8 | "Invalid Frame Register!"); |
2696 | 8 | |
2697 | 8 | // Always make a copy of the frame register to a vreg first, so that we |
2698 | 8 | // never directly reference the frame register (the TwoAddressInstruction- |
2699 | 8 | // Pass doesn't like that). |
2700 | 8 | unsigned SrcReg = createResultReg(RC); |
2701 | 8 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2702 | 8 | TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg); |
2703 | 8 | |
2704 | 8 | // Now recursively load from the frame address. |
2705 | 8 | // movq (%rbp), %rax |
2706 | 8 | // movq (%rax), %rax |
2707 | 8 | // movq (%rax), %rax |
2708 | 8 | // ... |
2709 | 8 | unsigned DestReg; |
2710 | 8 | unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue(); |
2711 | 16 | while (Depth--) { |
2712 | 8 | DestReg = createResultReg(RC); |
2713 | 8 | addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2714 | 8 | TII.get(Opc), DestReg), SrcReg); |
2715 | 8 | SrcReg = DestReg; |
2716 | 8 | } |
2717 | 8 | |
2718 | 8 | updateValueMap(II, SrcReg); |
2719 | 8 | return true; |
2720 | 8 | } |
2721 | 21 | case Intrinsic::memcpy: { |
2722 | 21 | const MemCpyInst *MCI = cast<MemCpyInst>(II); |
2723 | 21 | // Don't handle volatile or variable length memcpys. |
2724 | 21 | if (MCI->isVolatile()) |
2725 | 0 | return false; |
2726 | 21 | |
2727 | 21 | if (isa<ConstantInt>(MCI->getLength())) { |
2728 | 21 | // Small memcpy's are common enough that we want to do them |
2729 | 21 | // without a call if possible. |
2730 | 21 | uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue(); |
2731 | 21 | if (IsMemcpySmall(Len)) { |
2732 | 18 | X86AddressMode DestAM, SrcAM; |
2733 | 18 | if (!X86SelectAddress(MCI->getRawDest(), DestAM) || |
2734 | 18 | !X86SelectAddress(MCI->getRawSource(), SrcAM)) |
2735 | 0 | return false; |
2736 | 18 | TryEmitSmallMemcpy(DestAM, SrcAM, Len); |
2737 | 18 | return true; |
2738 | 18 | } |
2739 | 21 | } |
2740 | 3 | |
2741 | 3 | unsigned SizeWidth = Subtarget->is64Bit() ? 641 : 322 ; |
2742 | 3 | if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth)) |
2743 | 0 | return false; |
2744 | 3 | |
2745 | 3 | if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255) |
2746 | 0 | return false; |
2747 | 3 | |
2748 | 3 | return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1); |
2749 | 3 | } |
2750 | 17 | case Intrinsic::memset: { |
2751 | 17 | const MemSetInst *MSI = cast<MemSetInst>(II); |
2752 | 17 | |
2753 | 17 | if (MSI->isVolatile()) |
2754 | 0 | return false; |
2755 | 17 | |
2756 | 17 | unsigned SizeWidth = Subtarget->is64Bit() ? 647 : 3210 ; |
2757 | 17 | if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth)) |
2758 | 0 | return false; |
2759 | 17 | |
2760 | 17 | if (MSI->getDestAddressSpace() > 255) |
2761 | 0 | return false; |
2762 | 17 | |
2763 | 17 | return lowerCallTo(II, "memset", II->getNumArgOperands() - 1); |
2764 | 17 | } |
2765 | 25 | case Intrinsic::stackprotector: { |
2766 | 25 | // Emit code to store the stack guard onto the stack. |
2767 | 25 | EVT PtrTy = TLI.getPointerTy(DL); |
2768 | 25 | |
2769 | 25 | const Value *Op1 = II->getArgOperand(0); // The guard's value. |
2770 | 25 | const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1)); |
2771 | 25 | |
2772 | 25 | MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]); |
2773 | 25 | |
2774 | 25 | // Grab the frame index. |
2775 | 25 | X86AddressMode AM; |
2776 | 25 | if (!X86SelectAddress(Slot, AM)) return false0 ; |
2777 | 25 | if (!X86FastEmitStore(PtrTy, Op1, AM)) return false0 ; |
2778 | 25 | return true; |
2779 | 25 | } |
2780 | 25 | case Intrinsic::dbg_declare: { |
2781 | 0 | const DbgDeclareInst *DI = cast<DbgDeclareInst>(II); |
2782 | 0 | X86AddressMode AM; |
2783 | 0 | assert(DI->getAddress() && "Null address should be checked earlier!"); |
2784 | 0 | if (!X86SelectAddress(DI->getAddress(), AM)) |
2785 | 0 | return false; |
2786 | 0 | const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); |
2787 | 0 | // FIXME may need to add RegState::Debug to any registers produced, |
2788 | 0 | // although ESP/EBP should be the only ones at the moment. |
2789 | 0 | assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && |
2790 | 0 | "Expected inlined-at fields to agree"); |
2791 | 0 | addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM) |
2792 | 0 | .addImm(0) |
2793 | 0 | .addMetadata(DI->getVariable()) |
2794 | 0 | .addMetadata(DI->getExpression()); |
2795 | 0 | return true; |
2796 | 0 | } |
2797 | 2 | case Intrinsic::trap: { |
2798 | 2 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP)); |
2799 | 2 | return true; |
2800 | 0 | } |
2801 | 32 | case Intrinsic::sqrt: { |
2802 | 32 | if (!Subtarget->hasSSE1()) |
2803 | 0 | return false; |
2804 | 32 | |
2805 | 32 | Type *RetTy = II->getCalledFunction()->getReturnType(); |
2806 | 32 | |
2807 | 32 | MVT VT; |
2808 | 32 | if (!isTypeLegal(RetTy, VT)) |
2809 | 0 | return false; |
2810 | 32 | |
2811 | 32 | // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT |
2812 | 32 | // is not generated by FastISel yet. |
2813 | 32 | // FIXME: Update this code once tablegen can handle it. |
2814 | 32 | static const uint16_t SqrtOpc[3][2] = { |
2815 | 32 | { X86::SQRTSSr, X86::SQRTSDr }, |
2816 | 32 | { X86::VSQRTSSr, X86::VSQRTSDr }, |
2817 | 32 | { X86::VSQRTSSZr, X86::VSQRTSDZr }, |
2818 | 32 | }; |
2819 | 32 | unsigned AVXLevel = Subtarget->hasAVX512() ? 210 : |
2820 | 32 | Subtarget->hasAVX() 22 ? 112 : |
2821 | 22 | 010 ; |
2822 | 32 | unsigned Opc; |
2823 | 32 | switch (VT.SimpleTy) { |
2824 | 32 | default: return false20 ; |
2825 | 32 | case MVT::f32: Opc = SqrtOpc[AVXLevel][0]; break6 ; |
2826 | 32 | case MVT::f64: Opc = SqrtOpc[AVXLevel][1]; break6 ; |
2827 | 12 | } |
2828 | 12 | |
2829 | 12 | const Value *SrcVal = II->getArgOperand(0); |
2830 | 12 | unsigned SrcReg = getRegForValue(SrcVal); |
2831 | 12 | |
2832 | 12 | if (SrcReg == 0) |
2833 | 0 | return false; |
2834 | 12 | |
2835 | 12 | const TargetRegisterClass *RC = TLI.getRegClassFor(VT); |
2836 | 12 | unsigned ImplicitDefReg = 0; |
2837 | 12 | if (AVXLevel > 0) { |
2838 | 6 | ImplicitDefReg = createResultReg(RC); |
2839 | 6 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2840 | 6 | TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); |
2841 | 6 | } |
2842 | 12 | |
2843 | 12 | unsigned ResultReg = createResultReg(RC); |
2844 | 12 | MachineInstrBuilder MIB; |
2845 | 12 | MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), |
2846 | 12 | ResultReg); |
2847 | 12 | |
2848 | 12 | if (ImplicitDefReg) |
2849 | 6 | MIB.addReg(ImplicitDefReg); |
2850 | 12 | |
2851 | 12 | MIB.addReg(SrcReg); |
2852 | 12 | |
2853 | 12 | updateValueMap(II, ResultReg); |
2854 | 12 | return true; |
2855 | 12 | } |
2856 | 83 | case Intrinsic::sadd_with_overflow: |
2857 | 83 | case Intrinsic::uadd_with_overflow: |
2858 | 83 | case Intrinsic::ssub_with_overflow: |
2859 | 83 | case Intrinsic::usub_with_overflow: |
2860 | 83 | case Intrinsic::smul_with_overflow: |
2861 | 83 | case Intrinsic::umul_with_overflow: { |
2862 | 83 | // This implements the basic lowering of the xalu with overflow intrinsics |
2863 | 83 | // into add/sub/mul followed by either seto or setb. |
2864 | 83 | const Function *Callee = II->getCalledFunction(); |
2865 | 83 | auto *Ty = cast<StructType>(Callee->getReturnType()); |
2866 | 83 | Type *RetTy = Ty->getTypeAtIndex(0U); |
2867 | 83 | assert(Ty->getTypeAtIndex(1)->isIntegerTy() && |
2868 | 83 | Ty->getTypeAtIndex(1)->getScalarSizeInBits() == 1 && |
2869 | 83 | "Overflow value expected to be an i1"); |
2870 | 83 | |
2871 | 83 | MVT VT; |
2872 | 83 | if (!isTypeLegal(RetTy, VT)) |
2873 | 0 | return false; |
2874 | 83 | |
2875 | 83 | if (VT < MVT::i8 || VT > MVT::i64) |
2876 | 0 | return false; |
2877 | 83 | |
2878 | 83 | const Value *LHS = II->getArgOperand(0); |
2879 | 83 | const Value *RHS = II->getArgOperand(1); |
2880 | 83 | |
2881 | 83 | // Canonicalize immediate to the RHS. |
2882 | 83 | if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS)3 && |
2883 | 83 | isCommutativeIntrinsic(II)1 ) |
2884 | 1 | std::swap(LHS, RHS); |
2885 | 83 | |
2886 | 83 | unsigned BaseOpc, CondCode; |
2887 | 83 | switch (II->getIntrinsicID()) { |
2888 | 83 | default: 0 llvm_unreachable0 ("Unexpected intrinsic!"); |
2889 | 83 | case Intrinsic::sadd_with_overflow: |
2890 | 23 | BaseOpc = ISD::ADD; CondCode = X86::COND_O; break; |
2891 | 83 | case Intrinsic::uadd_with_overflow: |
2892 | 11 | BaseOpc = ISD::ADD; CondCode = X86::COND_B; break; |
2893 | 83 | case Intrinsic::ssub_with_overflow: |
2894 | 7 | BaseOpc = ISD::SUB; CondCode = X86::COND_O; break; |
2895 | 83 | case Intrinsic::usub_with_overflow: |
2896 | 6 | BaseOpc = ISD::SUB; CondCode = X86::COND_B; break; |
2897 | 83 | case Intrinsic::smul_with_overflow: |
2898 | 18 | BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break; |
2899 | 83 | case Intrinsic::umul_with_overflow: |
2900 | 18 | BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break; |
2901 | 83 | } |
2902 | 83 | |
2903 | 83 | unsigned LHSReg = getRegForValue(LHS); |
2904 | 83 | if (LHSReg == 0) |
2905 | 0 | return false; |
2906 | 83 | bool LHSIsKill = hasTrivialKill(LHS); |
2907 | 83 | |
2908 | 83 | unsigned ResultReg = 0; |
2909 | 83 | // Check if we have an immediate version. |
2910 | 83 | if (const auto *CI = dyn_cast<ConstantInt>(RHS)) { |
2911 | 17 | static const uint16_t Opc[2][4] = { |
2912 | 17 | { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r }, |
2913 | 17 | { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r } |
2914 | 17 | }; |
2915 | 17 | |
2916 | 17 | if (CI->isOne() && (10 BaseOpc == ISD::ADD10 || BaseOpc == ISD::SUB1 ) && |
2917 | 17 | CondCode == X86::COND_O10 ) { |
2918 | 6 | // We can use INC/DEC. |
2919 | 6 | ResultReg = createResultReg(TLI.getRegClassFor(VT)); |
2920 | 6 | bool IsDec = BaseOpc == ISD::SUB; |
2921 | 6 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2922 | 6 | TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg) |
2923 | 6 | .addReg(LHSReg, getKillRegState(LHSIsKill)); |
2924 | 6 | } else |
2925 | 11 | ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill, |
2926 | 11 | CI->getZExtValue()); |
2927 | 17 | } |
2928 | 83 | |
2929 | 83 | unsigned RHSReg; |
2930 | 83 | bool RHSIsKill; |
2931 | 83 | if (!ResultReg) { |
2932 | 68 | RHSReg = getRegForValue(RHS); |
2933 | 68 | if (RHSReg == 0) |
2934 | 0 | return false; |
2935 | 68 | RHSIsKill = hasTrivialKill(RHS); |
2936 | 68 | ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg, |
2937 | 68 | RHSIsKill); |
2938 | 68 | } |
2939 | 83 | |
2940 | 83 | // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit |
2941 | 83 | // it manually. |
2942 | 83 | if (BaseOpc == X86ISD::UMUL && !ResultReg18 ) { |
2943 | 18 | static const uint16_t MULOpc[] = |
2944 | 18 | { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r }; |
2945 | 18 | static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX }; |
2946 | 18 | // First copy the first operand into RAX, which is an implicit input to |
2947 | 18 | // the X86::MUL*r instruction. |
2948 | 18 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2949 | 18 | TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8]) |
2950 | 18 | .addReg(LHSReg, getKillRegState(LHSIsKill)); |
2951 | 18 | ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], |
2952 | 18 | TLI.getRegClassFor(VT), RHSReg, RHSIsKill); |
2953 | 65 | } else if (BaseOpc == X86ISD::SMUL && !ResultReg18 ) { |
2954 | 18 | static const uint16_t MULOpc[] = |
2955 | 18 | { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr }; |
2956 | 18 | if (VT == MVT::i8) { |
2957 | 4 | // Copy the first operand into AL, which is an implicit input to the |
2958 | 4 | // X86::IMUL8r instruction. |
2959 | 4 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
2960 | 4 | TII.get(TargetOpcode::COPY), X86::AL) |
2961 | 4 | .addReg(LHSReg, getKillRegState(LHSIsKill)); |
2962 | 4 | ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg, |
2963 | 4 | RHSIsKill); |
2964 | 4 | } else |
2965 | 14 | ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8], |
2966 | 14 | TLI.getRegClassFor(VT), LHSReg, LHSIsKill, |
2967 | 14 | RHSReg, RHSIsKill); |
2968 | 18 | } |
2969 | 83 | |
2970 | 83 | if (!ResultReg) |
2971 | 0 | return false; |
2972 | 83 | |
2973 | 83 | // Assign to a GPR since the overflow return value is lowered to a SETcc. |
2974 | 83 | unsigned ResultReg2 = createResultReg(&X86::GR8RegClass); |
2975 | 83 | assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers."); |
2976 | 83 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), |
2977 | 83 | ResultReg2).addImm(CondCode); |
2978 | 83 | |
2979 | 83 | updateValueMap(II, ResultReg, 2); |
2980 | 83 | return true; |
2981 | 83 | } |
2982 | 83 | case Intrinsic::x86_sse_cvttss2si: |
2983 | 32 | case Intrinsic::x86_sse_cvttss2si64: |
2984 | 32 | case Intrinsic::x86_sse2_cvttsd2si: |
2985 | 32 | case Intrinsic::x86_sse2_cvttsd2si64: { |
2986 | 32 | bool IsInputDouble; |
2987 | 32 | switch (II->getIntrinsicID()) { |
2988 | 32 | default: 0 llvm_unreachable0 ("Unexpected intrinsic."); |
2989 | 32 | case Intrinsic::x86_sse_cvttss2si: |
2990 | 19 | case Intrinsic::x86_sse_cvttss2si64: |
2991 | 19 | if (!Subtarget->hasSSE1()) |
2992 | 0 | return false; |
2993 | 19 | IsInputDouble = false; |
2994 | 19 | break; |
2995 | 19 | case Intrinsic::x86_sse2_cvttsd2si: |
2996 | 13 | case Intrinsic::x86_sse2_cvttsd2si64: |
2997 | 13 | if (!Subtarget->hasSSE2()) |
2998 | 0 | return false; |
2999 | 13 | IsInputDouble = true; |
3000 | 13 | break; |
3001 | 32 | } |
3002 | 32 | |
3003 | 32 | Type *RetTy = II->getCalledFunction()->getReturnType(); |
3004 | 32 | MVT VT; |
3005 | 32 | if (!isTypeLegal(RetTy, VT)) |
3006 | 0 | return false; |
3007 | 32 | |
3008 | 32 | static const uint16_t CvtOpc[3][2][2] = { |
3009 | 32 | { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr }, |
3010 | 32 | { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } }, |
3011 | 32 | { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr }, |
3012 | 32 | { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } }, |
3013 | 32 | { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr }, |
3014 | 32 | { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } }, |
3015 | 32 | }; |
3016 | 32 | unsigned AVXLevel = Subtarget->hasAVX512() ? 28 : |
3017 | 32 | Subtarget->hasAVX() 24 ? 112 : |
3018 | 24 | 012 ; |
3019 | 32 | unsigned Opc; |
3020 | 32 | switch (VT.SimpleTy) { |
3021 | 32 | default: 0 llvm_unreachable0 ("Unexpected result type."); |
3022 | 32 | case MVT::i32: Opc = CvtOpc[AVXLevel][IsInputDouble][0]; break22 ; |
3023 | 32 | case MVT::i64: Opc = CvtOpc[AVXLevel][IsInputDouble][1]; break10 ; |
3024 | 32 | } |
3025 | 32 | |
3026 | 32 | // Check if we can fold insertelement instructions into the convert. |
3027 | 32 | const Value *Op = II->getArgOperand(0); |
3028 | 48 | while (auto *IE = dyn_cast<InsertElementInst>(Op)) { |
3029 | 24 | const Value *Index = IE->getOperand(2); |
3030 | 24 | if (!isa<ConstantInt>(Index)) |
3031 | 0 | break; |
3032 | 24 | unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); |
3033 | 24 | |
3034 | 24 | if (Idx == 0) { |
3035 | 8 | Op = IE->getOperand(1); |
3036 | 8 | break; |
3037 | 8 | } |
3038 | 16 | Op = IE->getOperand(0); |
3039 | 16 | } |
3040 | 32 | |
3041 | 32 | unsigned Reg = getRegForValue(Op); |
3042 | 32 | if (Reg == 0) |
3043 | 0 | return false; |
3044 | 32 | |
3045 | 32 | unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); |
3046 | 32 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) |
3047 | 32 | .addReg(Reg); |
3048 | 32 | |
3049 | 32 | updateValueMap(II, ResultReg); |
3050 | 32 | return true; |
3051 | 32 | } |
3052 | 2.43k | } |
3053 | 2.43k | } |
3054 | | |
3055 | 9.56k | bool X86FastISel::fastLowerArguments() { |
3056 | 9.56k | if (!FuncInfo.CanLowerReturn) |
3057 | 0 | return false; |
3058 | 9.56k | |
3059 | 9.56k | const Function *F = FuncInfo.Fn; |
3060 | 9.56k | if (F->isVarArg()) |
3061 | 9 | return false; |
3062 | 9.55k | |
3063 | 9.55k | CallingConv::ID CC = F->getCallingConv(); |
3064 | 9.55k | if (CC != CallingConv::C) |
3065 | 130 | return false; |
3066 | 9.42k | |
3067 | 9.42k | if (Subtarget->isCallingConvWin64(CC)) |
3068 | 155 | return false; |
3069 | 9.26k | |
3070 | 9.26k | if (!Subtarget->is64Bit()) |
3071 | 3.47k | return false; |
3072 | 5.79k | |
3073 | 5.79k | if (Subtarget->useSoftFloat()) |
3074 | 1 | return false; |
3075 | 5.79k | |
3076 | 5.79k | // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments. |
3077 | 5.79k | unsigned GPRCnt = 0; |
3078 | 5.79k | unsigned FPRCnt = 0; |
3079 | 6.89k | for (auto const &Arg : F->args()) { |
3080 | 6.89k | if (Arg.hasAttribute(Attribute::ByVal) || |
3081 | 6.89k | Arg.hasAttribute(Attribute::InReg)6.88k || |
3082 | 6.89k | Arg.hasAttribute(Attribute::StructRet)6.88k || |
3083 | 6.89k | Arg.hasAttribute(Attribute::SwiftSelf)6.87k || |
3084 | 6.89k | Arg.hasAttribute(Attribute::SwiftError)6.87k || |
3085 | 6.89k | Arg.hasAttribute(Attribute::Nest)6.86k ) |
3086 | 26 | return false; |
3087 | 6.86k | |
3088 | 6.86k | Type *ArgTy = Arg.getType(); |
3089 | 6.86k | if (ArgTy->isStructTy() || ArgTy->isArrayTy()6.86k || ArgTy->isVectorTy()6.86k ) |
3090 | 2.70k | return false; |
3091 | 4.16k | |
3092 | 4.16k | EVT ArgVT = TLI.getValueType(DL, ArgTy); |
3093 | 4.16k | if (!ArgVT.isSimple()) return false6 ; |
3094 | 4.15k | switch (ArgVT.getSimpleVT().SimpleTy) { |
3095 | 4.15k | default: return false518 ; |
3096 | 4.15k | case MVT::i32: |
3097 | 2.76k | case MVT::i64: |
3098 | 2.76k | ++GPRCnt; |
3099 | 2.76k | break; |
3100 | 2.76k | case MVT::f32: |
3101 | 877 | case MVT::f64: |
3102 | 877 | if (!Subtarget->hasSSE1()) |
3103 | 0 | return false; |
3104 | 877 | ++FPRCnt; |
3105 | 877 | break; |
3106 | 3.64k | } |
3107 | 3.64k | |
3108 | 3.64k | if (GPRCnt > 6) |
3109 | 4 | return false; |
3110 | 3.63k | |
3111 | 3.63k | if (FPRCnt > 8) |
3112 | 1 | return false; |
3113 | 3.63k | } |
3114 | 5.79k | |
3115 | 5.79k | static const MCPhysReg GPR32ArgRegs[] = { |
3116 | 2.53k | X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D |
3117 | 2.53k | }; |
3118 | 2.53k | static const MCPhysReg GPR64ArgRegs[] = { |
3119 | 2.53k | X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9 |
3120 | 2.53k | }; |
3121 | 2.53k | static const MCPhysReg XMMArgRegs[] = { |
3122 | 2.53k | X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, |
3123 | 2.53k | X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 |
3124 | 2.53k | }; |
3125 | 2.53k | |
3126 | 2.53k | unsigned GPRIdx = 0; |
3127 | 2.53k | unsigned FPRIdx = 0; |
3128 | 3.12k | for (auto const &Arg : F->args()) { |
3129 | 3.12k | MVT VT = TLI.getSimpleValueType(DL, Arg.getType()); |
3130 | 3.12k | const TargetRegisterClass *RC = TLI.getRegClassFor(VT); |
3131 | 3.12k | unsigned SrcReg; |
3132 | 3.12k | switch (VT.SimpleTy) { |
3133 | 3.12k | default: 0 llvm_unreachable0 ("Unexpected value type."); |
3134 | 3.12k | case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break641 ; |
3135 | 3.12k | case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break1.61k ; |
3136 | 3.12k | case MVT::f32: 548 LLVM_FALLTHROUGH548 ; |
3137 | 868 | case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break; |
3138 | 3.12k | } |
3139 | 3.12k | unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); |
3140 | 3.12k | // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. |
3141 | 3.12k | // Without this, EmitLiveInCopies may eliminate the livein if its only |
3142 | 3.12k | // use is a bitcast (which isn't turned into an instruction). |
3143 | 3.12k | unsigned ResultReg = createResultReg(RC); |
3144 | 3.12k | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3145 | 3.12k | TII.get(TargetOpcode::COPY), ResultReg) |
3146 | 3.12k | .addReg(DstReg, getKillRegState(true)); |
3147 | 3.12k | updateValueMap(&Arg, ResultReg); |
3148 | 3.12k | } |
3149 | 2.53k | return true; |
3150 | 2.53k | } |
3151 | | |
3152 | | static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget, |
3153 | | CallingConv::ID CC, |
3154 | 910 | ImmutableCallSite *CS) { |
3155 | 910 | if (Subtarget->is64Bit()) |
3156 | 722 | return 0; |
3157 | 188 | if (Subtarget->getTargetTriple().isOSMSVCRT()) |
3158 | 85 | return 0; |
3159 | 103 | if (CC == CallingConv::Fast || CC == CallingConv::GHC100 || |
3160 | 103 | CC == CallingConv::HiPE100 ) |
3161 | 3 | return 0; |
3162 | 100 | |
3163 | 100 | if (CS) |
3164 | 100 | if (CS->arg_empty() || !CS->paramHasAttr(0, Attribute::StructRet)72 || |
3165 | 100 | CS->paramHasAttr(0, Attribute::InReg)3 || Subtarget->isTargetMCU()2 ) |
3166 | 98 | return 0; |
3167 | 2 | |
3168 | 2 | return 4; |
3169 | 2 | } |
3170 | | |
3171 | 1.15k | bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { |
3172 | 1.15k | auto &OutVals = CLI.OutVals; |
3173 | 1.15k | auto &OutFlags = CLI.OutFlags; |
3174 | 1.15k | auto &OutRegs = CLI.OutRegs; |
3175 | 1.15k | auto &Ins = CLI.Ins; |
3176 | 1.15k | auto &InRegs = CLI.InRegs; |
3177 | 1.15k | CallingConv::ID CC = CLI.CallConv; |
3178 | 1.15k | bool &IsTailCall = CLI.IsTailCall; |
3179 | 1.15k | bool IsVarArg = CLI.IsVarArg; |
3180 | 1.15k | const Value *Callee = CLI.Callee; |
3181 | 1.15k | MCSymbol *Symbol = CLI.Symbol; |
3182 | 1.15k | |
3183 | 1.15k | bool Is64Bit = Subtarget->is64Bit(); |
3184 | 1.15k | bool IsWin64 = Subtarget->isCallingConvWin64(CC); |
3185 | 1.15k | |
3186 | 1.15k | const CallInst *CI = |
3187 | 1.15k | CLI.CS ? dyn_cast<CallInst>(CLI.CS->getInstruction())1.14k : nullptr12 ; |
3188 | 1.15k | const Function *CalledFn = CI ? CI->getCalledFunction()1.14k : nullptr12 ; |
3189 | 1.15k | |
3190 | 1.15k | // Call / invoke instructions with NoCfCheck attribute require special |
3191 | 1.15k | // handling. |
3192 | 1.15k | const auto *II = |
3193 | 1.15k | CLI.CS ? dyn_cast<InvokeInst>(CLI.CS->getInstruction())1.14k : nullptr12 ; |
3194 | 1.15k | if ((CI && CI->doesNoCfCheck()1.14k ) || (II && II->doesNoCfCheck()0 )) |
3195 | 0 | return false; |
3196 | 1.15k | |
3197 | 1.15k | // Functions with no_caller_saved_registers that need special handling. |
3198 | 1.15k | if ((CI && CI->hasFnAttr("no_caller_saved_registers")1.14k ) || |
3199 | 1.15k | (CalledFn && CalledFn->hasFnAttribute("no_caller_saved_registers")1.06k )) |
3200 | 0 | return false; |
3201 | 1.15k | |
3202 | 1.15k | // Functions using retpoline for indirect calls need to use SDISel. |
3203 | 1.15k | if (Subtarget->useRetpolineIndirectCalls()) |
3204 | 24 | return false; |
3205 | 1.12k | |
3206 | 1.12k | // Handle only C, fastcc, and webkit_js calling conventions for now. |
3207 | 1.12k | switch (CC) { |
3208 | 1.12k | default: return false0 ; |
3209 | 1.12k | case CallingConv::C: |
3210 | 1.12k | case CallingConv::Fast: |
3211 | 1.12k | case CallingConv::WebKit_JS: |
3212 | 1.12k | case CallingConv::Swift: |
3213 | 1.12k | case CallingConv::X86_FastCall: |
3214 | 1.12k | case CallingConv::X86_StdCall: |
3215 | 1.12k | case CallingConv::X86_ThisCall: |
3216 | 1.12k | case CallingConv::Win64: |
3217 | 1.12k | case CallingConv::X86_64_SysV: |
3218 | 1.12k | break; |
3219 | 1.12k | } |
3220 | 1.12k | |
3221 | 1.12k | // Allow SelectionDAG isel to handle tail calls. |
3222 | 1.12k | if (IsTailCall) |
3223 | 64 | return false; |
3224 | 1.06k | |
3225 | 1.06k | // fastcc with -tailcallopt is intended to provide a guaranteed |
3226 | 1.06k | // tail call optimization. Fastisel doesn't know how to do that. |
3227 | 1.06k | if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt4 ) |
3228 | 1 | return false; |
3229 | 1.06k | |
3230 | 1.06k | // Don't know how to handle Win64 varargs yet. Nothing special needed for |
3231 | 1.06k | // x86-32. Special handling for x86-64 is implemented. |
3232 | 1.06k | if (IsVarArg && IsWin6490 ) |
3233 | 7 | return false; |
3234 | 1.05k | |
3235 | 1.05k | // Don't know about inalloca yet. |
3236 | 1.05k | if (CLI.CS && CLI.CS->hasInAllocaArgument()1.04k ) |
3237 | 0 | return false; |
3238 | 1.05k | |
3239 | 1.05k | for (auto Flag : CLI.OutFlags) |
3240 | 1.03k | if (Flag.isSwiftError()) |
3241 | 3 | return false; |
3242 | 1.05k | |
3243 | 1.05k | SmallVector<MVT, 16> OutVTs; |
3244 | 1.05k | SmallVector<unsigned, 16> ArgRegs; |
3245 | 1.05k | |
3246 | 1.05k | // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra |
3247 | 1.05k | // instruction. This is safe because it is common to all FastISel supported |
3248 | 1.05k | // calling conventions on x86. |
3249 | 2.00k | for (int i = 0, e = OutVals.size(); i != e; ++i954 ) { |
3250 | 1.00k | Value *&Val = OutVals[i]; |
3251 | 1.00k | ISD::ArgFlagsTy Flags = OutFlags[i]; |
3252 | 1.00k | if (auto *CI = dyn_cast<ConstantInt>(Val)) { |
3253 | 296 | if (CI->getBitWidth() < 32) { |
3254 | 24 | if (Flags.isSExt()) |
3255 | 1 | Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext())); |
3256 | 23 | else |
3257 | 23 | Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext())); |
3258 | 24 | } |
3259 | 296 | } |
3260 | 1.00k | |
3261 | 1.00k | // Passing bools around ends up doing a trunc to i1 and passing it. |
3262 | 1.00k | // Codegen this as an argument + "and 1". |
3263 | 1.00k | MVT VT; |
3264 | 1.00k | auto *TI = dyn_cast<TruncInst>(Val); |
3265 | 1.00k | unsigned ResultReg; |
3266 | 1.00k | if (TI && TI->getType()->isIntegerTy(1)5 && CLI.CS4 && |
3267 | 1.00k | (TI->getParent() == CLI.CS->getInstruction()->getParent())4 && |
3268 | 1.00k | TI->hasOneUse()4 ) { |
3269 | 4 | Value *PrevVal = TI->getOperand(0); |
3270 | 4 | ResultReg = getRegForValue(PrevVal); |
3271 | 4 | |
3272 | 4 | if (!ResultReg) |
3273 | 0 | return false; |
3274 | 4 | |
3275 | 4 | if (!isTypeLegal(PrevVal->getType(), VT)) |
3276 | 0 | return false; |
3277 | 4 | |
3278 | 4 | ResultReg = |
3279 | 4 | fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1); |
3280 | 996 | } else { |
3281 | 996 | if (!isTypeLegal(Val->getType(), VT)) |
3282 | 41 | return false; |
3283 | 955 | ResultReg = getRegForValue(Val); |
3284 | 955 | } |
3285 | 1.00k | |
3286 | 1.00k | if (959 !ResultReg959 ) |
3287 | 5 | return false; |
3288 | 954 | |
3289 | 954 | ArgRegs.push_back(ResultReg); |
3290 | 954 | OutVTs.push_back(VT); |
3291 | 954 | } |
3292 | 1.05k | |
3293 | 1.05k | // Analyze operands of the call, assigning locations to each operand. |
3294 | 1.05k | SmallVector<CCValAssign, 16> ArgLocs; |
3295 | 1.00k | CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext()); |
3296 | 1.00k | |
3297 | 1.00k | // Allocate shadow area for Win64 |
3298 | 1.00k | if (IsWin64) |
3299 | 60 | CCInfo.AllocateStack(32, 8); |
3300 | 1.00k | |
3301 | 1.00k | CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86); |
3302 | 1.00k | |
3303 | 1.00k | // Get a count of how many bytes are to be pushed on the stack. |
3304 | 1.00k | unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); |
3305 | 1.00k | |
3306 | 1.00k | // Issue CALLSEQ_START |
3307 | 1.00k | unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); |
3308 | 1.00k | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) |
3309 | 1.00k | .addImm(NumBytes).addImm(0).addImm(0); |
3310 | 1.00k | |
3311 | 1.00k | // Walk the register/memloc assignments, inserting copies/loads. |
3312 | 1.00k | const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); |
3313 | 1.92k | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i919 ) { |
3314 | 922 | CCValAssign const &VA = ArgLocs[i]; |
3315 | 922 | const Value *ArgVal = OutVals[VA.getValNo()]; |
3316 | 922 | MVT ArgVT = OutVTs[VA.getValNo()]; |
3317 | 922 | |
3318 | 922 | if (ArgVT == MVT::x86mmx) |
3319 | 2 | return false; |
3320 | 920 | |
3321 | 920 | unsigned ArgReg = ArgRegs[VA.getValNo()]; |
3322 | 920 | |
3323 | 920 | // Promote the value if needed. |
3324 | 920 | switch (VA.getLocInfo()) { |
3325 | 920 | case CCValAssign::Full: break909 ; |
3326 | 920 | case CCValAssign::SExt: { |
3327 | 0 | assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && |
3328 | 0 | "Unexpected extend"); |
3329 | 0 |
|
3330 | 0 | if (ArgVT == MVT::i1) |
3331 | 0 | return false; |
3332 | 0 | |
3333 | 0 | bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg, |
3334 | 0 | ArgVT, ArgReg); |
3335 | 0 | assert(Emitted && "Failed to emit a sext!"); (void)Emitted; |
3336 | 0 | ArgVT = VA.getLocVT(); |
3337 | 0 | break; |
3338 | 0 | } |
3339 | 5 | case CCValAssign::ZExt: { |
3340 | 5 | assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && |
3341 | 5 | "Unexpected extend"); |
3342 | 5 | |
3343 | 5 | // Handle zero-extension from i1 to i8, which is common. |
3344 | 5 | if (ArgVT == MVT::i1) { |
3345 | 0 | // Set the high bits to zero. |
3346 | 0 | ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false); |
3347 | 0 | ArgVT = MVT::i8; |
3348 | 0 |
|
3349 | 0 | if (ArgReg == 0) |
3350 | 0 | return false; |
3351 | 5 | } |
3352 | 5 | |
3353 | 5 | bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg, |
3354 | 5 | ArgVT, ArgReg); |
3355 | 5 | assert(Emitted && "Failed to emit a zext!"); (void)Emitted; |
3356 | 5 | ArgVT = VA.getLocVT(); |
3357 | 5 | break; |
3358 | 5 | } |
3359 | 6 | case CCValAssign::AExt: { |
3360 | 6 | assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && |
3361 | 6 | "Unexpected extend"); |
3362 | 6 | bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg, |
3363 | 6 | ArgVT, ArgReg); |
3364 | 6 | if (!Emitted) |
3365 | 3 | Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg, |
3366 | 3 | ArgVT, ArgReg); |
3367 | 6 | if (!Emitted) |
3368 | 0 | Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg, |
3369 | 0 | ArgVT, ArgReg); |
3370 | 6 | |
3371 | 6 | assert(Emitted && "Failed to emit a aext!"); (void)Emitted; |
3372 | 6 | ArgVT = VA.getLocVT(); |
3373 | 6 | break; |
3374 | 5 | } |
3375 | 5 | case CCValAssign::BCvt: { |
3376 | 0 | ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg, |
3377 | 0 | /*TODO: Kill=*/false); |
3378 | 0 | assert(ArgReg && "Failed to emit a bitcast!"); |
3379 | 0 | ArgVT = VA.getLocVT(); |
3380 | 0 | break; |
3381 | 5 | } |
3382 | 5 | case CCValAssign::VExt: |
3383 | 0 | // VExt has not been implemented, so this should be impossible to reach |
3384 | 0 | // for now. However, fallback to Selection DAG isel once implemented. |
3385 | 0 | return false; |
3386 | 5 | case CCValAssign::AExtUpper: |
3387 | 0 | case CCValAssign::SExtUpper: |
3388 | 0 | case CCValAssign::ZExtUpper: |
3389 | 0 | case CCValAssign::FPExt: |
3390 | 0 | llvm_unreachable("Unexpected loc info!"); |
3391 | 0 | case CCValAssign::Indirect: |
3392 | 0 | // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully |
3393 | 0 | // support this. |
3394 | 0 | return false; |
3395 | 920 | } |
3396 | 920 | |
3397 | 920 | if (VA.isRegLoc()) { |
3398 | 704 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3399 | 704 | TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); |
3400 | 704 | OutRegs.push_back(VA.getLocReg()); |
3401 | 704 | } else { |
3402 | 216 | assert(VA.isMemLoc()); |
3403 | 216 | |
3404 | 216 | // Don't emit stores for undef values. |
3405 | 216 | if (isa<UndefValue>(ArgVal)) |
3406 | 6 | continue; |
3407 | 210 | |
3408 | 210 | unsigned LocMemOffset = VA.getLocMemOffset(); |
3409 | 210 | X86AddressMode AM; |
3410 | 210 | AM.Base.Reg = RegInfo->getStackRegister(); |
3411 | 210 | AM.Disp = LocMemOffset; |
3412 | 210 | ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()]; |
3413 | 210 | unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType()); |
3414 | 210 | MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( |
3415 | 210 | MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset), |
3416 | 210 | MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment); |
3417 | 210 | if (Flags.isByVal()) { |
3418 | 8 | X86AddressMode SrcAM; |
3419 | 8 | SrcAM.Base.Reg = ArgReg; |
3420 | 8 | if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize())) |
3421 | 1 | return false; |
3422 | 202 | } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)134 ) { |
3423 | 69 | // If this is a really simple value, emit this with the Value* version |
3424 | 69 | // of X86FastEmitStore. If it isn't simple, we don't want to do this, |
3425 | 69 | // as it can cause us to reevaluate the argument. |
3426 | 69 | if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO)) |
3427 | 0 | return false; |
3428 | 133 | } else { |
3429 | 133 | bool ValIsKill = hasTrivialKill(ArgVal); |
3430 | 133 | if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO)) |
3431 | 0 | return false; |
3432 | 133 | } |
3433 | 210 | } |
3434 | 920 | } |
3435 | 1.00k | |
3436 | 1.00k | // ELF / PIC requires GOT in the EBX register before function calls via PLT |
3437 | 1.00k | // GOT pointer. |
3438 | 1.00k | if (1.00k Subtarget->isPICStyleGOT()1.00k ) { |
3439 | 12 | unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); |
3440 | 12 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3441 | 12 | TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base); |
3442 | 12 | } |
3443 | 1.00k | |
3444 | 1.00k | if (Is64Bit && IsVarArg778 && !IsWin6464 ) { |
3445 | 64 | // From AMD64 ABI document: |
3446 | 64 | // For calls that may call functions that use varargs or stdargs |
3447 | 64 | // (prototype-less calls or calls to functions containing ellipsis (...) in |
3448 | 64 | // the declaration) %al is used as hidden argument to specify the number |
3449 | 64 | // of SSE registers used. The contents of %al do not need to match exactly |
3450 | 64 | // the number of registers, but must be an ubound on the number of SSE |
3451 | 64 | // registers used and is in the range 0 - 8 inclusive. |
3452 | 64 | |
3453 | 64 | // Count the number of XMM registers allocated. |
3454 | 64 | static const MCPhysReg XMMArgRegs[] = { |
3455 | 64 | X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, |
3456 | 64 | X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 |
3457 | 64 | }; |
3458 | 64 | unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs); |
3459 | 64 | assert((Subtarget->hasSSE1() || !NumXMMRegs) |
3460 | 64 | && "SSE registers cannot be used when SSE is disabled"); |
3461 | 64 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), |
3462 | 64 | X86::AL).addImm(NumXMMRegs); |
3463 | 64 | } |
3464 | 1.00k | |
3465 | 1.00k | // Materialize callee address in a register. FIXME: GV address can be |
3466 | 1.00k | // handled with a CALLpcrel32 instead. |
3467 | 1.00k | X86AddressMode CalleeAM; |
3468 | 1.00k | if (!X86SelectCallAddress(Callee, CalleeAM)) |
3469 | 56 | return false; |
3470 | 948 | |
3471 | 948 | unsigned CalleeOp = 0; |
3472 | 948 | const GlobalValue *GV = nullptr; |
3473 | 948 | if (CalleeAM.GV != nullptr) { |
3474 | 923 | GV = CalleeAM.GV; |
3475 | 923 | } else if (25 CalleeAM.Base.Reg != 025 ) { |
3476 | 25 | CalleeOp = CalleeAM.Base.Reg; |
3477 | 25 | } else |
3478 | 0 | return false; |
3479 | 948 | |
3480 | 948 | // Issue the call. |
3481 | 948 | MachineInstrBuilder MIB; |
3482 | 948 | if (CalleeOp) { |
3483 | 25 | // Register-indirect call. |
3484 | 25 | unsigned CallOpc = Is64Bit ? X86::CALL64r17 : X86::CALL32r8 ; |
3485 | 25 | MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)) |
3486 | 25 | .addReg(CalleeOp); |
3487 | 923 | } else { |
3488 | 923 | // Direct call. |
3489 | 923 | assert(GV && "Not a direct call"); |
3490 | 923 | // See if we need any target-specific flags on the GV operand. |
3491 | 923 | unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV); |
3492 | 923 | |
3493 | 923 | // This will be a direct call, or an indirect call through memory for |
3494 | 923 | // NonLazyBind calls or dllimport calls. |
3495 | 923 | bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT || |
3496 | 923 | OpFlags == X86II::MO_GOTPCREL905 || |
3497 | 923 | OpFlags == X86II::MO_COFFSTUB895 ; |
3498 | 923 | unsigned CallOpc = NeedLoad |
3499 | 923 | ? (Is64Bit 28 ? X86::CALL64m16 : X86::CALL32m12 ) |
3500 | 923 | : (Is64Bit 895 ? X86::CALL64pcrel32689 : X86::CALLpcrel32206 ); |
3501 | 923 | |
3502 | 923 | MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)); |
3503 | 923 | if (NeedLoad) |
3504 | 28 | MIB.addReg(Is64Bit ? X86::RIP16 : 012 ).addImm(1).addReg(0); |
3505 | 923 | if (Symbol) |
3506 | 20 | MIB.addSym(Symbol, OpFlags); |
3507 | 903 | else |
3508 | 903 | MIB.addGlobalAddress(GV, 0, OpFlags); |
3509 | 923 | if (NeedLoad) |
3510 | 28 | MIB.addReg(0); |
3511 | 923 | } |
3512 | 948 | |
3513 | 948 | // Add a register mask operand representing the call-preserved registers. |
3514 | 948 | // Proper defs for return values will be added by setPhysRegsDeadExcept(). |
3515 | 948 | MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); |
3516 | 948 | |
3517 | 948 | // Add an implicit use GOT pointer in EBX. |
3518 | 948 | if (Subtarget->isPICStyleGOT()) |
3519 | 12 | MIB.addReg(X86::EBX, RegState::Implicit); |
3520 | 948 | |
3521 | 948 | if (Is64Bit && IsVarArg722 && !IsWin6464 ) |
3522 | 64 | MIB.addReg(X86::AL, RegState::Implicit); |
3523 | 948 | |
3524 | 948 | // Add implicit physical register uses to the call. |
3525 | 948 | for (auto Reg : OutRegs) |
3526 | 673 | MIB.addReg(Reg, RegState::Implicit); |
3527 | 948 | |
3528 | 948 | // Issue CALLSEQ_END |
3529 | 948 | unsigned NumBytesForCalleeToPop = |
3530 | 948 | X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg, |
3531 | 948 | TM.Options.GuaranteedTailCallOpt) |
3532 | 948 | ? NumBytes38 // Callee pops everything. |
3533 | 948 | : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CS)910 ; |
3534 | 948 | unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); |
3535 | 948 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) |
3536 | 948 | .addImm(NumBytes).addImm(NumBytesForCalleeToPop); |
3537 | 948 | |
3538 | 948 | // Now handle call return values. |
3539 | 948 | SmallVector<CCValAssign, 16> RVLocs; |
3540 | 948 | CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, |
3541 | 948 | CLI.RetTy->getContext()); |
3542 | 948 | CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86); |
3543 | 948 | |
3544 | 948 | // Copy all of the result registers out of their specified physreg. |
3545 | 948 | unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy); |
3546 | 1.25k | for (unsigned i = 0; i != RVLocs.size(); ++i306 ) { |
3547 | 306 | CCValAssign &VA = RVLocs[i]; |
3548 | 306 | EVT CopyVT = VA.getValVT(); |
3549 | 306 | unsigned CopyReg = ResultReg + i; |
3550 | 306 | unsigned SrcReg = VA.getLocReg(); |
3551 | 306 | |
3552 | 306 | // If this is x86-64, and we disabled SSE, we can't return FP values |
3553 | 306 | if ((CopyVT == MVT::f32 || CopyVT == MVT::f64297 ) && |
3554 | 306 | (9 (9 Is64Bit9 || Ins[i].Flags.isInReg()0 ) && !Subtarget->hasSSE1()9 )) { |
3555 | 0 | report_fatal_error("SSE register return with SSE disabled"); |
3556 | 0 | } |
3557 | 306 | |
3558 | 306 | // If we prefer to use the value in xmm registers, copy it out as f80 and |
3559 | 306 | // use a truncate to move it from fp stack reg to xmm reg. |
3560 | 306 | if ((SrcReg == X86::FP0 || SrcReg == X86::FP1305 ) && |
3561 | 306 | isScalarFPTypeInSSEReg(VA.getValVT())1 ) { |
3562 | 0 | CopyVT = MVT::f80; |
3563 | 0 | CopyReg = createResultReg(&X86::RFP80RegClass); |
3564 | 0 | } |
3565 | 306 | |
3566 | 306 | // Copy out the result. |
3567 | 306 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3568 | 306 | TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg); |
3569 | 306 | InRegs.push_back(VA.getLocReg()); |
3570 | 306 | |
3571 | 306 | // Round the f80 to the right size, which also moves it to the appropriate |
3572 | 306 | // xmm register. This is accomplished by storing the f80 value in memory |
3573 | 306 | // and then loading it back. |
3574 | 306 | if (CopyVT != VA.getValVT()) { |
3575 | 0 | EVT ResVT = VA.getValVT(); |
3576 | 0 | unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; |
3577 | 0 | unsigned MemSize = ResVT.getSizeInBits()/8; |
3578 | 0 | int FI = MFI.CreateStackObject(MemSize, MemSize, false); |
3579 | 0 | addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3580 | 0 | TII.get(Opc)), FI) |
3581 | 0 | .addReg(CopyReg); |
3582 | 0 | Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt; |
3583 | 0 | addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3584 | 0 | TII.get(Opc), ResultReg + i), FI); |
3585 | 0 | } |
3586 | 306 | } |
3587 | 948 | |
3588 | 948 | CLI.ResultReg = ResultReg; |
3589 | 948 | CLI.NumResultRegs = RVLocs.size(); |
3590 | 948 | CLI.Call = MIB; |
3591 | 948 | |
3592 | 948 | return true; |
3593 | 948 | } |
3594 | | |
3595 | | bool |
3596 | 24.5k | X86FastISel::fastSelectInstruction(const Instruction *I) { |
3597 | 24.5k | switch (I->getOpcode()) { |
3598 | 24.5k | default: break4.45k ; |
3599 | 24.5k | case Instruction::Load: |
3600 | 1.47k | return X86SelectLoad(I); |
3601 | 24.5k | case Instruction::Store: |
3602 | 2.17k | return X86SelectStore(I); |
3603 | 24.5k | case Instruction::Ret: |
3604 | 9.65k | return X86SelectRet(I); |
3605 | 24.5k | case Instruction::ICmp: |
3606 | 309 | case Instruction::FCmp: |
3607 | 309 | return X86SelectCmp(I); |
3608 | 309 | case Instruction::ZExt: |
3609 | 130 | return X86SelectZExt(I); |
3610 | 309 | case Instruction::SExt: |
3611 | 211 | return X86SelectSExt(I); |
3612 | 432 | case Instruction::Br: |
3613 | 432 | return X86SelectBranch(I); |
3614 | 309 | case Instruction::LShr: |
3615 | 24 | case Instruction::AShr: |
3616 | 24 | case Instruction::Shl: |
3617 | 24 | return X86SelectShift(I); |
3618 | 44 | case Instruction::SDiv: |
3619 | 44 | case Instruction::UDiv: |
3620 | 44 | case Instruction::SRem: |
3621 | 44 | case Instruction::URem: |
3622 | 44 | return X86SelectDivRem(I); |
3623 | 1.50k | case Instruction::Select: |
3624 | 1.50k | return X86SelectSelect(I); |
3625 | 44 | case Instruction::Trunc: |
3626 | 31 | return X86SelectTrunc(I); |
3627 | 44 | case Instruction::FPExt: |
3628 | 29 | return X86SelectFPExt(I); |
3629 | 44 | case Instruction::FPTrunc: |
3630 | 9 | return X86SelectFPTrunc(I); |
3631 | 44 | case Instruction::SIToFP: |
3632 | 31 | return X86SelectSIToFP(I); |
3633 | 44 | case Instruction::UIToFP: |
3634 | 21 | return X86SelectUIToFP(I); |
3635 | 44 | case Instruction::IntToPtr: // Deliberate fall-through. |
3636 | 9 | case Instruction::PtrToInt: { |
3637 | 9 | EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); |
3638 | 9 | EVT DstVT = TLI.getValueType(DL, I->getType()); |
3639 | 9 | if (DstVT.bitsGT(SrcVT)) |
3640 | 3 | return X86SelectZExt(I); |
3641 | 6 | if (DstVT.bitsLT(SrcVT)) |
3642 | 2 | return X86SelectTrunc(I); |
3643 | 4 | unsigned Reg = getRegForValue(I->getOperand(0)); |
3644 | 4 | if (Reg == 0) return false; |
3645 | 0 | updateValueMap(I, Reg); |
3646 | 0 | return true; |
3647 | 0 | } |
3648 | 4.02k | case Instruction::BitCast: { |
3649 | 4.02k | // Select SSE2/AVX bitcasts between 128/256/512 bit vector types. |
3650 | 4.02k | if (!Subtarget->hasSSE2()) |
3651 | 33 | return false; |
3652 | 3.99k | |
3653 | 3.99k | MVT SrcVT, DstVT; |
3654 | 3.99k | if (!isTypeLegal(I->getOperand(0)->getType(), SrcVT) || |
3655 | 3.99k | !isTypeLegal(I->getType(), DstVT)3.98k ) |
3656 | 12 | return false; |
3657 | 3.98k | |
3658 | 3.98k | // Only allow vectors that use xmm/ymm/zmm. |
3659 | 3.98k | if (!SrcVT.isVector() || !DstVT.isVector()3.86k || |
3660 | 3.98k | SrcVT.getVectorElementType() == MVT::i13.76k || |
3661 | 3.98k | DstVT.getVectorElementType() == MVT::i13.76k ) |
3662 | 217 | return false; |
3663 | 3.76k | |
3664 | 3.76k | unsigned Reg = getRegForValue(I->getOperand(0)); |
3665 | 3.76k | if (Reg == 0) |
3666 | 8 | return false; |
3667 | 3.75k | |
3668 | 3.75k | // No instruction is needed for conversion. Reuse the register used by |
3669 | 3.75k | // the fist operand. |
3670 | 3.75k | updateValueMap(I, Reg); |
3671 | 3.75k | return true; |
3672 | 3.75k | } |
3673 | 4.45k | } |
3674 | 4.45k | |
3675 | 4.45k | return false; |
3676 | 4.45k | } |
3677 | | |
3678 | 810 | unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) { |
3679 | 810 | if (VT > MVT::i64) |
3680 | 0 | return 0; |
3681 | 810 | |
3682 | 810 | uint64_t Imm = CI->getZExtValue(); |
3683 | 810 | if (Imm == 0) { |
3684 | 406 | unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass); |
3685 | 406 | switch (VT.SimpleTy) { |
3686 | 406 | default: 0 llvm_unreachable0 ("Unexpected value type"); |
3687 | 406 | case MVT::i1: |
3688 | 39 | case MVT::i8: |
3689 | 39 | return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true, |
3690 | 39 | X86::sub_8bit); |
3691 | 39 | case MVT::i16: |
3692 | 0 | return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true, |
3693 | 0 | X86::sub_16bit); |
3694 | 347 | case MVT::i32: |
3695 | 347 | return SrcReg; |
3696 | 39 | case MVT::i64: { |
3697 | 20 | unsigned ResultReg = createResultReg(&X86::GR64RegClass); |
3698 | 20 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3699 | 20 | TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) |
3700 | 20 | .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); |
3701 | 20 | return ResultReg; |
3702 | 404 | } |
3703 | 406 | } |
3704 | 406 | } |
3705 | 404 | |
3706 | 404 | unsigned Opc = 0; |
3707 | 404 | switch (VT.SimpleTy) { |
3708 | 404 | default: 0 llvm_unreachable0 ("Unexpected value type"); |
3709 | 404 | case MVT::i1: |
3710 | 29 | VT = MVT::i8; |
3711 | 29 | LLVM_FALLTHROUGH; |
3712 | 33 | case MVT::i8: Opc = X86::MOV8ri; break; |
3713 | 29 | case MVT::i16: Opc = X86::MOV16ri; break0 ; |
3714 | 318 | case MVT::i32: Opc = X86::MOV32ri; break; |
3715 | 53 | case MVT::i64: { |
3716 | 53 | if (isUInt<32>(Imm)) |
3717 | 34 | Opc = X86::MOV32ri64; |
3718 | 19 | else if (isInt<32>(Imm)) |
3719 | 8 | Opc = X86::MOV64ri32; |
3720 | 11 | else |
3721 | 11 | Opc = X86::MOV64ri; |
3722 | 53 | break; |
3723 | 404 | } |
3724 | 404 | } |
3725 | 404 | return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm); |
3726 | 404 | } |
3727 | | |
3728 | 98 | unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) { |
3729 | 98 | if (CFP->isNullValue()) |
3730 | 57 | return fastMaterializeFloatZero(CFP); |
3731 | 41 | |
3732 | 41 | // Can't handle alternate code models yet. |
3733 | 41 | CodeModel::Model CM = TM.getCodeModel(); |
3734 | 41 | if (CM != CodeModel::Small && CM != CodeModel::Large12 ) |
3735 | 0 | return 0; |
3736 | 41 | |
3737 | 41 | // Get opcode and regclass of the output for the given load instruction. |
3738 | 41 | unsigned Opc = 0; |
3739 | 41 | bool HasAVX = Subtarget->hasAVX(); |
3740 | 41 | bool HasAVX512 = Subtarget->hasAVX512(); |
3741 | 41 | switch (VT.SimpleTy) { |
3742 | 41 | default: return 00 ; |
3743 | 41 | case MVT::f32: |
3744 | 19 | if (X86ScalarSSEf32) |
3745 | 19 | Opc = HasAVX512 ? X86::VMOVSSZrm_alt2 : |
3746 | 19 | HasAVX 17 ? X86::VMOVSSrm_alt2 : |
3747 | 17 | X86::MOVSSrm_alt15 ; |
3748 | 0 | else |
3749 | 0 | Opc = X86::LD_Fp32m; |
3750 | 19 | break; |
3751 | 41 | case MVT::f64: |
3752 | 22 | if (X86ScalarSSEf64) |
3753 | 21 | Opc = HasAVX512 ? X86::VMOVSDZrm_alt2 : |
3754 | 21 | HasAVX 19 ? X86::VMOVSDrm_alt4 : |
3755 | 19 | X86::MOVSDrm_alt15 ; |
3756 | 1 | else |
3757 | 1 | Opc = X86::LD_Fp64m; |
3758 | 22 | break; |
3759 | 41 | case MVT::f80: |
3760 | 0 | // No f80 support yet. |
3761 | 0 | return 0; |
3762 | 41 | } |
3763 | 41 | |
3764 | 41 | // MachineConstantPool wants an explicit alignment. |
3765 | 41 | unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); |
3766 | 41 | if (Align == 0) { |
3767 | 0 | // Alignment of vector types. FIXME! |
3768 | 0 | Align = DL.getTypeAllocSize(CFP->getType()); |
3769 | 0 | } |
3770 | 41 | |
3771 | 41 | // x86-32 PIC requires a PIC base register for constant pools. |
3772 | 41 | unsigned PICBase = 0; |
3773 | 41 | unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr); |
3774 | 41 | if (OpFlag == X86II::MO_PIC_BASE_OFFSET) |
3775 | 0 | PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); |
3776 | 41 | else if (OpFlag == X86II::MO_GOTOFF) |
3777 | 0 | PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); |
3778 | 41 | else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small40 ) |
3779 | 28 | PICBase = X86::RIP; |
3780 | 41 | |
3781 | 41 | // Create the load from the constant pool. |
3782 | 41 | unsigned CPI = MCP.getConstantPoolIndex(CFP, Align); |
3783 | 41 | unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); |
3784 | 41 | |
3785 | 41 | if (CM == CodeModel::Large) { |
3786 | 12 | unsigned AddrReg = createResultReg(&X86::GR64RegClass); |
3787 | 12 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), |
3788 | 12 | AddrReg) |
3789 | 12 | .addConstantPoolIndex(CPI, 0, OpFlag); |
3790 | 12 | MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3791 | 12 | TII.get(Opc), ResultReg); |
3792 | 12 | addDirectMem(MIB, AddrReg); |
3793 | 12 | MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( |
3794 | 12 | MachinePointerInfo::getConstantPool(*FuncInfo.MF), |
3795 | 12 | MachineMemOperand::MOLoad, DL.getPointerSize(), Align); |
3796 | 12 | MIB->addMemOperand(*FuncInfo.MF, MMO); |
3797 | 12 | return ResultReg; |
3798 | 12 | } |
3799 | 29 | |
3800 | 29 | addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3801 | 29 | TII.get(Opc), ResultReg), |
3802 | 29 | CPI, PICBase, OpFlag); |
3803 | 29 | return ResultReg; |
3804 | 29 | } |
3805 | | |
3806 | 231 | unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) { |
3807 | 231 | // Can't handle alternate code models yet. |
3808 | 231 | if (TM.getCodeModel() != CodeModel::Small) |
3809 | 39 | return 0; |
3810 | 192 | |
3811 | 192 | // Materialize addresses with LEA/MOV instructions. |
3812 | 192 | X86AddressMode AM; |
3813 | 192 | if (X86SelectAddress(GV, AM)) { |
3814 | 174 | // If the expression is just a basereg, then we're done, otherwise we need |
3815 | 174 | // to emit an LEA. |
3816 | 174 | if (AM.BaseType == X86AddressMode::RegBase && |
3817 | 174 | AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr) |
3818 | 29 | return AM.Base.Reg; |
3819 | 145 | |
3820 | 145 | unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); |
3821 | 145 | if (TM.getRelocationModel() == Reloc::Static && |
3822 | 145 | TLI.getPointerTy(DL) == MVT::i6475 ) { |
3823 | 34 | // The displacement code could be more than 32 bits away so we need to use |
3824 | 34 | // an instruction with a 64 bit immediate |
3825 | 34 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), |
3826 | 34 | ResultReg) |
3827 | 34 | .addGlobalAddress(GV); |
3828 | 111 | } else { |
3829 | 111 | unsigned Opc = |
3830 | 111 | TLI.getPointerTy(DL) == MVT::i32 |
3831 | 111 | ? (Subtarget->isTarget64BitILP32() 50 ? X86::LEA64_32r0 : X86::LEA32r50 ) |
3832 | 111 | : X86::LEA64r61 ; |
3833 | 111 | addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3834 | 111 | TII.get(Opc), ResultReg), AM); |
3835 | 111 | } |
3836 | 145 | return ResultReg; |
3837 | 145 | } |
3838 | 18 | return 0; |
3839 | 18 | } |
3840 | | |
3841 | 1.48k | unsigned X86FastISel::fastMaterializeConstant(const Constant *C) { |
3842 | 1.48k | EVT CEVT = TLI.getValueType(DL, C->getType(), true); |
3843 | 1.48k | |
3844 | 1.48k | // Only handle simple types. |
3845 | 1.48k | if (!CEVT.isSimple()) |
3846 | 0 | return 0; |
3847 | 1.48k | MVT VT = CEVT.getSimpleVT(); |
3848 | 1.48k | |
3849 | 1.48k | if (const auto *CI = dyn_cast<ConstantInt>(C)) |
3850 | 810 | return X86MaterializeInt(CI, VT); |
3851 | 678 | else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) |
3852 | 98 | return X86MaterializeFP(CFP, VT); |
3853 | 580 | else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) |
3854 | 231 | return X86MaterializeGV(GV, VT); |
3855 | 349 | |
3856 | 349 | return 0; |
3857 | 349 | } |
3858 | | |
3859 | 232 | unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) { |
3860 | 232 | // Fail on dynamic allocas. At this point, getRegForValue has already |
3861 | 232 | // checked its CSE maps, so if we're here trying to handle a dynamic |
3862 | 232 | // alloca, we're not going to succeed. X86SelectAddress has a |
3863 | 232 | // check for dynamic allocas, because it's called directly from |
3864 | 232 | // various places, but targetMaterializeAlloca also needs a check |
3865 | 232 | // in order to avoid recursion between getRegForValue, |
3866 | 232 | // X86SelectAddrss, and targetMaterializeAlloca. |
3867 | 232 | if (!FuncInfo.StaticAllocaMap.count(C)) |
3868 | 0 | return 0; |
3869 | 232 | assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?"); |
3870 | 232 | |
3871 | 232 | X86AddressMode AM; |
3872 | 232 | if (!X86SelectAddress(C, AM)) |
3873 | 0 | return 0; |
3874 | 232 | unsigned Opc = |
3875 | 232 | TLI.getPointerTy(DL) == MVT::i32 |
3876 | 232 | ? (Subtarget->isTarget64BitILP32() 86 ? X86::LEA64_32r3 : X86::LEA32r83 ) |
3877 | 232 | : X86::LEA64r146 ; |
3878 | 232 | const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL)); |
3879 | 232 | unsigned ResultReg = createResultReg(RC); |
3880 | 232 | addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3881 | 232 | TII.get(Opc), ResultReg), AM); |
3882 | 232 | return ResultReg; |
3883 | 232 | } |
3884 | | |
3885 | 58 | unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) { |
3886 | 58 | MVT VT; |
3887 | 58 | if (!isTypeLegal(CF->getType(), VT)) |
3888 | 2 | return 0; |
3889 | 56 | |
3890 | 56 | // Get opcode and regclass for the given zero. |
3891 | 56 | bool HasAVX512 = Subtarget->hasAVX512(); |
3892 | 56 | unsigned Opc = 0; |
3893 | 56 | switch (VT.SimpleTy) { |
3894 | 56 | default: return 00 ; |
3895 | 56 | case MVT::f32: |
3896 | 52 | if (X86ScalarSSEf32) |
3897 | 52 | Opc = HasAVX512 ? X86::AVX512_FsFLD0SS12 : X86::FsFLD0SS40 ; |
3898 | 0 | else |
3899 | 0 | Opc = X86::LD_Fp032; |
3900 | 52 | break; |
3901 | 56 | case MVT::f64: |
3902 | 4 | if (X86ScalarSSEf64) |
3903 | 4 | Opc = HasAVX512 ? X86::AVX512_FsFLD0SD0 : X86::FsFLD0SD; |
3904 | 0 | else |
3905 | 0 | Opc = X86::LD_Fp064; |
3906 | 4 | break; |
3907 | 56 | case MVT::f80: |
3908 | 0 | // No f80 support yet. |
3909 | 0 | return 0; |
3910 | 56 | } |
3911 | 56 | |
3912 | 56 | unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); |
3913 | 56 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); |
3914 | 56 | return ResultReg; |
3915 | 56 | } |
3916 | | |
3917 | | |
3918 | | bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, |
3919 | 1.22k | const LoadInst *LI) { |
3920 | 1.22k | const Value *Ptr = LI->getPointerOperand(); |
3921 | 1.22k | X86AddressMode AM; |
3922 | 1.22k | if (!X86SelectAddress(Ptr, AM)) |
3923 | 55 | return false; |
3924 | 1.17k | |
3925 | 1.17k | const X86InstrInfo &XII = (const X86InstrInfo &)TII; |
3926 | 1.17k | |
3927 | 1.17k | unsigned Size = DL.getTypeAllocSize(LI->getType()); |
3928 | 1.17k | unsigned Alignment = LI->getAlignment(); |
3929 | 1.17k | |
3930 | 1.17k | if (Alignment == 0) // Ensure that codegen never sees alignment 0 |
3931 | 207 | Alignment = DL.getABITypeAlignment(LI->getType()); |
3932 | 1.17k | |
3933 | 1.17k | SmallVector<MachineOperand, 8> AddrOps; |
3934 | 1.17k | AM.getFullAddress(AddrOps); |
3935 | 1.17k | |
3936 | 1.17k | MachineInstr *Result = XII.foldMemoryOperandImpl( |
3937 | 1.17k | *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment, |
3938 | 1.17k | /*AllowCommute=*/true); |
3939 | 1.17k | if (!Result) |
3940 | 841 | return false; |
3941 | 331 | |
3942 | 331 | // The index register could be in the wrong register class. Unfortunately, |
3943 | 331 | // foldMemoryOperandImpl could have commuted the instruction so its not enough |
3944 | 331 | // to just look at OpNo + the offset to the index reg. We actually need to |
3945 | 331 | // scan the instruction to find the index reg and see if its the correct reg |
3946 | 331 | // class. |
3947 | 331 | unsigned OperandNo = 0; |
3948 | 331 | for (MachineInstr::mop_iterator I = Result->operands_begin(), |
3949 | 2.71k | E = Result->operands_end(); I != E; ++I, ++OperandNo2.37k ) { |
3950 | 2.37k | MachineOperand &MO = *I; |
3951 | 2.37k | if (!MO.isReg() || MO.isDef()1.49k || MO.getReg() != AM.IndexReg1.03k ) |
3952 | 1.69k | continue; |
3953 | 682 | // Found the index reg, now try to rewrite it. |
3954 | 682 | unsigned IndexReg = constrainOperandRegClass(Result->getDesc(), |
3955 | 682 | MO.getReg(), OperandNo); |
3956 | 682 | if (IndexReg == MO.getReg()) |
3957 | 682 | continue; |
3958 | 0 | MO.setReg(IndexReg); |
3959 | 0 | } |
3960 | 331 | |
3961 | 331 | Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI)); |
3962 | 331 | Result->cloneInstrSymbols(*FuncInfo.MF, *MI); |
3963 | 331 | MachineBasicBlock::iterator I(MI); |
3964 | 331 | removeDeadCode(I, std::next(I)); |
3965 | 331 | return true; |
3966 | 331 | } |
3967 | | |
3968 | | unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode, |
3969 | | const TargetRegisterClass *RC, |
3970 | | unsigned Op0, bool Op0IsKill, |
3971 | | unsigned Op1, bool Op1IsKill, |
3972 | | unsigned Op2, bool Op2IsKill, |
3973 | 26 | unsigned Op3, bool Op3IsKill) { |
3974 | 26 | const MCInstrDesc &II = TII.get(MachineInstOpcode); |
3975 | 26 | |
3976 | 26 | unsigned ResultReg = createResultReg(RC); |
3977 | 26 | Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); |
3978 | 26 | Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); |
3979 | 26 | Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); |
3980 | 26 | Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3); |
3981 | 26 | |
3982 | 26 | if (II.getNumDefs() >= 1) |
3983 | 26 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) |
3984 | 26 | .addReg(Op0, getKillRegState(Op0IsKill)) |
3985 | 26 | .addReg(Op1, getKillRegState(Op1IsKill)) |
3986 | 26 | .addReg(Op2, getKillRegState(Op2IsKill)) |
3987 | 26 | .addReg(Op3, getKillRegState(Op3IsKill)); |
3988 | 0 | else { |
3989 | 0 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) |
3990 | 0 | .addReg(Op0, getKillRegState(Op0IsKill)) |
3991 | 0 | .addReg(Op1, getKillRegState(Op1IsKill)) |
3992 | 0 | .addReg(Op2, getKillRegState(Op2IsKill)) |
3993 | 0 | .addReg(Op3, getKillRegState(Op3IsKill)); |
3994 | 0 | BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, |
3995 | 0 | TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); |
3996 | 0 | } |
3997 | 26 | return ResultReg; |
3998 | 26 | } |
3999 | | |
4000 | | |
4001 | | namespace llvm { |
4002 | | FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo, |
4003 | 9.56k | const TargetLibraryInfo *libInfo) { |
4004 | 9.56k | return new X86FastISel(funcInfo, libInfo); |
4005 | 9.56k | } |
4006 | | } |