/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file implements the SystemZTargetLowering class. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "SystemZISelLowering.h" |
15 | | #include "SystemZCallingConv.h" |
16 | | #include "SystemZConstantPoolValue.h" |
17 | | #include "SystemZMachineFunctionInfo.h" |
18 | | #include "SystemZTargetMachine.h" |
19 | | #include "llvm/CodeGen/CallingConvLower.h" |
20 | | #include "llvm/CodeGen/MachineInstrBuilder.h" |
21 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
22 | | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" |
23 | | #include "llvm/IR/Intrinsics.h" |
24 | | #include "llvm/IR/IntrinsicInst.h" |
25 | | #include "llvm/Support/CommandLine.h" |
26 | | #include "llvm/Support/KnownBits.h" |
27 | | #include <cctype> |
28 | | |
29 | | using namespace llvm; |
30 | | |
31 | | #define DEBUG_TYPE "systemz-lower" |
32 | | |
33 | | namespace { |
34 | | // Represents a sequence for extracting a 0/1 value from an IPM result: |
35 | | // (((X ^ XORValue) + AddValue) >> Bit) |
36 | | struct IPMConversion { |
37 | | IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) |
38 | 200 | : XORValue(xorValue), AddValue(addValue), Bit(bit) {} |
39 | | |
40 | | int64_t XORValue; |
41 | | int64_t AddValue; |
42 | | unsigned Bit; |
43 | | }; |
44 | | |
45 | | // Represents information about a comparison. |
46 | | struct Comparison { |
47 | | Comparison(SDValue Op0In, SDValue Op1In) |
48 | 1.49k | : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} |
49 | | |
50 | | // The operands to the comparison. |
51 | | SDValue Op0, Op1; |
52 | | |
53 | | // The opcode that should be used to compare Op0 and Op1. |
54 | | unsigned Opcode; |
55 | | |
56 | | // A SystemZICMP value. Only used for integer comparisons. |
57 | | unsigned ICmpType; |
58 | | |
59 | | // The mask of CC values that Opcode can produce. |
60 | | unsigned CCValid; |
61 | | |
62 | | // The mask of CC values for which the original condition is true. |
63 | | unsigned CCMask; |
64 | | }; |
65 | | } // end anonymous namespace |
66 | | |
67 | | // Classify VT as either 32 or 64 bit. |
68 | 404 | static bool is32Bit(EVT VT) { |
69 | 404 | switch (VT.getSimpleVT().SimpleTy) { |
70 | 174 | case MVT::i32: |
71 | 174 | return true; |
72 | 230 | case MVT::i64: |
73 | 230 | return false; |
74 | 0 | default: |
75 | 0 | llvm_unreachable("Unsupported type"); |
76 | 0 | } |
77 | 0 | } |
78 | | |
79 | | // Return a version of MachineOperand that can be safely used before the |
80 | | // final use. |
81 | 1.07k | static MachineOperand earlyUseOperand(MachineOperand Op) { |
82 | 1.07k | if (Op.isReg()) |
83 | 786 | Op.setIsKill(false); |
84 | 1.07k | return Op; |
85 | 1.07k | } |
86 | | |
87 | | SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, |
88 | | const SystemZSubtarget &STI) |
89 | 857 | : TargetLowering(TM), Subtarget(STI) { |
90 | 857 | MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize()); |
91 | 857 | |
92 | 857 | // Set up the register classes. |
93 | 857 | if (Subtarget.hasHighWord()) |
94 | 330 | addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); |
95 | 857 | else |
96 | 527 | addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); |
97 | 857 | addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); |
98 | 857 | if (Subtarget.hasVector()857 ) { |
99 | 269 | addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass); |
100 | 269 | addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass); |
101 | 857 | } else { |
102 | 588 | addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); |
103 | 588 | addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); |
104 | 588 | } |
105 | 857 | if (Subtarget.hasVectorEnhancements1()) |
106 | 62 | addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); |
107 | 857 | else |
108 | 795 | addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); |
109 | 857 | |
110 | 857 | if (Subtarget.hasVector()857 ) { |
111 | 269 | addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); |
112 | 269 | addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass); |
113 | 269 | addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass); |
114 | 269 | addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass); |
115 | 269 | addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass); |
116 | 269 | addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass); |
117 | 269 | } |
118 | 857 | |
119 | 857 | // Compute derived properties from the register classes |
120 | 857 | computeRegisterProperties(Subtarget.getRegisterInfo()); |
121 | 857 | |
122 | 857 | // Set up special registers. |
123 | 857 | setStackPointerRegisterToSaveRestore(SystemZ::R15D); |
124 | 857 | |
125 | 857 | // TODO: It may be better to default to latency-oriented scheduling, however |
126 | 857 | // LLVM's current latency-oriented scheduler can't handle physreg definitions |
127 | 857 | // such as SystemZ has with CC, so set this to the register-pressure |
128 | 857 | // scheduler, because it can. |
129 | 857 | setSchedulingPreference(Sched::RegPressure); |
130 | 857 | |
131 | 857 | setBooleanContents(ZeroOrOneBooleanContent); |
132 | 857 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
133 | 857 | |
134 | 857 | // Instructions are strings of 2-byte aligned 2-byte values. |
135 | 857 | setMinFunctionAlignment(2); |
136 | 857 | |
137 | 857 | // Handle operations that are handled in a similar way for all types. |
138 | 857 | for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; |
139 | 11.1k | I <= MVT::LAST_FP_VALUETYPE; |
140 | 10.2k | ++I10.2k ) { |
141 | 10.2k | MVT VT = MVT::SimpleValueType(I); |
142 | 10.2k | if (isTypeLegal(VT)10.2k ) { |
143 | 4.28k | // Lower SET_CC into an IPM-based sequence. |
144 | 4.28k | setOperationAction(ISD::SETCC, VT, Custom); |
145 | 4.28k | |
146 | 4.28k | // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). |
147 | 4.28k | setOperationAction(ISD::SELECT, VT, Expand); |
148 | 4.28k | |
149 | 4.28k | // Lower SELECT_CC and BR_CC into separate comparisons and branches. |
150 | 4.28k | setOperationAction(ISD::SELECT_CC, VT, Custom); |
151 | 4.28k | setOperationAction(ISD::BR_CC, VT, Custom); |
152 | 4.28k | } |
153 | 10.2k | } |
154 | 857 | |
155 | 857 | // Expand jump table branches as address arithmetic followed by an |
156 | 857 | // indirect jump. |
157 | 857 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); |
158 | 857 | |
159 | 857 | // Expand BRCOND into a BR_CC (see above). |
160 | 857 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); |
161 | 857 | |
162 | 857 | // Handle integer types. |
163 | 857 | for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; |
164 | 5.99k | I <= MVT::LAST_INTEGER_VALUETYPE; |
165 | 5.14k | ++I5.14k ) { |
166 | 5.14k | MVT VT = MVT::SimpleValueType(I); |
167 | 5.14k | if (isTypeLegal(VT)5.14k ) { |
168 | 1.71k | // Expand individual DIV and REMs into DIVREMs. |
169 | 1.71k | setOperationAction(ISD::SDIV, VT, Expand); |
170 | 1.71k | setOperationAction(ISD::UDIV, VT, Expand); |
171 | 1.71k | setOperationAction(ISD::SREM, VT, Expand); |
172 | 1.71k | setOperationAction(ISD::UREM, VT, Expand); |
173 | 1.71k | setOperationAction(ISD::SDIVREM, VT, Custom); |
174 | 1.71k | setOperationAction(ISD::UDIVREM, VT, Custom); |
175 | 1.71k | |
176 | 1.71k | // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and |
177 | 1.71k | // stores, putting a serialization instruction after the stores. |
178 | 1.71k | setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); |
179 | 1.71k | setOperationAction(ISD::ATOMIC_STORE, VT, Custom); |
180 | 1.71k | |
181 | 1.71k | // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are |
182 | 1.71k | // available, or if the operand is constant. |
183 | 1.71k | setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); |
184 | 1.71k | |
185 | 1.71k | // Use POPCNT on z196 and above. |
186 | 1.71k | if (Subtarget.hasPopulationCount()) |
187 | 660 | setOperationAction(ISD::CTPOP, VT, Custom); |
188 | 1.71k | else |
189 | 1.05k | setOperationAction(ISD::CTPOP, VT, Expand); |
190 | 1.71k | |
191 | 1.71k | // No special instructions for these. |
192 | 1.71k | setOperationAction(ISD::CTTZ, VT, Expand); |
193 | 1.71k | setOperationAction(ISD::ROTR, VT, Expand); |
194 | 1.71k | |
195 | 1.71k | // Use *MUL_LOHI where possible instead of MULH*. |
196 | 1.71k | setOperationAction(ISD::MULHS, VT, Expand); |
197 | 1.71k | setOperationAction(ISD::MULHU, VT, Expand); |
198 | 1.71k | setOperationAction(ISD::SMUL_LOHI, VT, Custom); |
199 | 1.71k | setOperationAction(ISD::UMUL_LOHI, VT, Custom); |
200 | 1.71k | |
201 | 1.71k | // Only z196 and above have native support for conversions to unsigned. |
202 | 1.71k | // On z10, promoting to i64 doesn't generate an inexact condition for |
203 | 1.71k | // values that are outside the i32 range but in the i64 range, so use |
204 | 1.71k | // the default expansion. |
205 | 1.71k | if (!Subtarget.hasFPExtension()) |
206 | 1.05k | setOperationAction(ISD::FP_TO_UINT, VT, Expand); |
207 | 1.71k | } |
208 | 5.14k | } |
209 | 857 | |
210 | 857 | // Type legalization will convert 8- and 16-bit atomic operations into |
211 | 857 | // forms that operate on i32s (but still keeping the original memory VT). |
212 | 857 | // Lower them into full i32 operations. |
213 | 857 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); |
214 | 857 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); |
215 | 857 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); |
216 | 857 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); |
217 | 857 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); |
218 | 857 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); |
219 | 857 | setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); |
220 | 857 | setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); |
221 | 857 | setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); |
222 | 857 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); |
223 | 857 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); |
224 | 857 | |
225 | 857 | // Even though i128 is not a legal type, we still need to custom lower |
226 | 857 | // the atomic operations in order to exploit SystemZ instructions. |
227 | 857 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); |
228 | 857 | setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); |
229 | 857 | |
230 | 857 | // We can use the CC result of compare-and-swap to implement |
231 | 857 | // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. |
232 | 857 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); |
233 | 857 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); |
234 | 857 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); |
235 | 857 | |
236 | 857 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); |
237 | 857 | |
238 | 857 | // Traps are legal, as we will convert them to "j .+2". |
239 | 857 | setOperationAction(ISD::TRAP, MVT::Other, Legal); |
240 | 857 | |
241 | 857 | // z10 has instructions for signed but not unsigned FP conversion. |
242 | 857 | // Handle unsigned 32-bit types as signed 64-bit types. |
243 | 857 | if (!Subtarget.hasFPExtension()857 ) { |
244 | 527 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); |
245 | 527 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); |
246 | 527 | } |
247 | 857 | |
248 | 857 | // We have native support for a 64-bit CTLZ, via FLOGR. |
249 | 857 | setOperationAction(ISD::CTLZ, MVT::i32, Promote); |
250 | 857 | setOperationAction(ISD::CTLZ, MVT::i64, Legal); |
251 | 857 | |
252 | 857 | // Give LowerOperation the chance to replace 64-bit ORs with subregs. |
253 | 857 | setOperationAction(ISD::OR, MVT::i64, Custom); |
254 | 857 | |
255 | 857 | // FIXME: Can we support these natively? |
256 | 857 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); |
257 | 857 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); |
258 | 857 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); |
259 | 857 | |
260 | 857 | // We have native instructions for i8, i16 and i32 extensions, but not i1. |
261 | 857 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
262 | 5.14k | for (MVT VT : MVT::integer_valuetypes()) { |
263 | 5.14k | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
264 | 5.14k | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); |
265 | 5.14k | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); |
266 | 5.14k | } |
267 | 857 | |
268 | 857 | // Handle the various types of symbolic address. |
269 | 857 | setOperationAction(ISD::ConstantPool, PtrVT, Custom); |
270 | 857 | setOperationAction(ISD::GlobalAddress, PtrVT, Custom); |
271 | 857 | setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); |
272 | 857 | setOperationAction(ISD::BlockAddress, PtrVT, Custom); |
273 | 857 | setOperationAction(ISD::JumpTable, PtrVT, Custom); |
274 | 857 | |
275 | 857 | // We need to handle dynamic allocations specially because of the |
276 | 857 | // 160-byte area at the bottom of the stack. |
277 | 857 | setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); |
278 | 857 | setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); |
279 | 857 | |
280 | 857 | // Use custom expanders so that we can force the function to use |
281 | 857 | // a frame pointer. |
282 | 857 | setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); |
283 | 857 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); |
284 | 857 | |
285 | 857 | // Handle prefetches with PFD or PFDRL. |
286 | 857 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); |
287 | 857 | |
288 | 80.5k | for (MVT VT : MVT::vector_valuetypes()) { |
289 | 80.5k | // Assume by default that all vector operations need to be expanded. |
290 | 20.8M | for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END20.8M ; ++Opcode20.7M ) |
291 | 20.7M | if (20.7M getOperationAction(Opcode, VT) == Legal20.7M ) |
292 | 18.2M | setOperationAction(Opcode, VT, Expand); |
293 | 80.5k | |
294 | 80.5k | // Likewise all truncating stores and extending loads. |
295 | 7.57M | for (MVT InnerVT : MVT::vector_valuetypes()) { |
296 | 7.57M | setTruncStoreAction(VT, InnerVT, Expand); |
297 | 7.57M | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); |
298 | 7.57M | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); |
299 | 7.57M | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); |
300 | 7.57M | } |
301 | 80.5k | |
302 | 80.5k | if (isTypeLegal(VT)80.5k ) { |
303 | 1.61k | // These operations are legal for anything that can be stored in a |
304 | 1.61k | // vector register, even if there is no native support for the format |
305 | 1.61k | // as such. In particular, we can do these for v4f32 even though there |
306 | 1.61k | // are no specific instructions for that format. |
307 | 1.61k | setOperationAction(ISD::LOAD, VT, Legal); |
308 | 1.61k | setOperationAction(ISD::STORE, VT, Legal); |
309 | 1.61k | setOperationAction(ISD::VSELECT, VT, Legal); |
310 | 1.61k | setOperationAction(ISD::BITCAST, VT, Legal); |
311 | 1.61k | setOperationAction(ISD::UNDEF, VT, Legal); |
312 | 1.61k | |
313 | 1.61k | // Likewise, except that we need to replace the nodes with something |
314 | 1.61k | // more specific. |
315 | 1.61k | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
316 | 1.61k | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
317 | 1.61k | } |
318 | 80.5k | } |
319 | 857 | |
320 | 857 | // Handle integer vector types. |
321 | 59.9k | for (MVT VT : MVT::integer_vector_valuetypes()) { |
322 | 59.9k | if (isTypeLegal(VT)59.9k ) { |
323 | 1.07k | // These operations have direct equivalents. |
324 | 1.07k | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); |
325 | 1.07k | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); |
326 | 1.07k | setOperationAction(ISD::ADD, VT, Legal); |
327 | 1.07k | setOperationAction(ISD::SUB, VT, Legal); |
328 | 1.07k | if (VT != MVT::v2i64) |
329 | 807 | setOperationAction(ISD::MUL, VT, Legal); |
330 | 1.07k | setOperationAction(ISD::AND, VT, Legal); |
331 | 1.07k | setOperationAction(ISD::OR, VT, Legal); |
332 | 1.07k | setOperationAction(ISD::XOR, VT, Legal); |
333 | 1.07k | if (Subtarget.hasVectorEnhancements1()) |
334 | 248 | setOperationAction(ISD::CTPOP, VT, Legal); |
335 | 1.07k | else |
336 | 828 | setOperationAction(ISD::CTPOP, VT, Custom); |
337 | 1.07k | setOperationAction(ISD::CTTZ, VT, Legal); |
338 | 1.07k | setOperationAction(ISD::CTLZ, VT, Legal); |
339 | 1.07k | |
340 | 1.07k | // Convert a GPR scalar to a vector by inserting it into element 0. |
341 | 1.07k | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); |
342 | 1.07k | |
343 | 1.07k | // Use a series of unpacks for extensions. |
344 | 1.07k | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); |
345 | 1.07k | setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); |
346 | 1.07k | |
347 | 1.07k | // Detect shifts by a scalar amount and convert them into |
348 | 1.07k | // V*_BY_SCALAR. |
349 | 1.07k | setOperationAction(ISD::SHL, VT, Custom); |
350 | 1.07k | setOperationAction(ISD::SRA, VT, Custom); |
351 | 1.07k | setOperationAction(ISD::SRL, VT, Custom); |
352 | 1.07k | |
353 | 1.07k | // At present ROTL isn't matched by DAGCombiner. ROTR should be |
354 | 1.07k | // converted into ROTL. |
355 | 1.07k | setOperationAction(ISD::ROTL, VT, Expand); |
356 | 1.07k | setOperationAction(ISD::ROTR, VT, Expand); |
357 | 1.07k | |
358 | 1.07k | // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands |
359 | 1.07k | // and inverting the result as necessary. |
360 | 1.07k | setOperationAction(ISD::SETCC, VT, Custom); |
361 | 1.07k | } |
362 | 59.9k | } |
363 | 857 | |
364 | 857 | if (Subtarget.hasVector()857 ) { |
365 | 269 | // There should be no need to check for float types other than v2f64 |
366 | 269 | // since <2 x f32> isn't a legal type. |
367 | 269 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); |
368 | 269 | setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); |
369 | 269 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); |
370 | 269 | setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); |
371 | 269 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); |
372 | 269 | setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); |
373 | 269 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); |
374 | 269 | setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); |
375 | 269 | } |
376 | 857 | |
377 | 857 | // Handle floating-point types. |
378 | 857 | for (unsigned I = MVT::FIRST_FP_VALUETYPE; |
379 | 5.99k | I <= MVT::LAST_FP_VALUETYPE; |
380 | 5.14k | ++I5.14k ) { |
381 | 5.14k | MVT VT = MVT::SimpleValueType(I); |
382 | 5.14k | if (isTypeLegal(VT)5.14k ) { |
383 | 2.57k | // We can use FI for FRINT. |
384 | 2.57k | setOperationAction(ISD::FRINT, VT, Legal); |
385 | 2.57k | |
386 | 2.57k | // We can use the extended form of FI for other rounding operations. |
387 | 2.57k | if (Subtarget.hasFPExtension()2.57k ) { |
388 | 990 | setOperationAction(ISD::FNEARBYINT, VT, Legal); |
389 | 990 | setOperationAction(ISD::FFLOOR, VT, Legal); |
390 | 990 | setOperationAction(ISD::FCEIL, VT, Legal); |
391 | 990 | setOperationAction(ISD::FTRUNC, VT, Legal); |
392 | 990 | setOperationAction(ISD::FROUND, VT, Legal); |
393 | 990 | } |
394 | 2.57k | |
395 | 2.57k | // No special instructions for these. |
396 | 2.57k | setOperationAction(ISD::FSIN, VT, Expand); |
397 | 2.57k | setOperationAction(ISD::FCOS, VT, Expand); |
398 | 2.57k | setOperationAction(ISD::FSINCOS, VT, Expand); |
399 | 2.57k | setOperationAction(ISD::FREM, VT, Expand); |
400 | 2.57k | setOperationAction(ISD::FPOW, VT, Expand); |
401 | 2.57k | } |
402 | 5.14k | } |
403 | 857 | |
404 | 857 | // Handle floating-point vector types. |
405 | 857 | if (Subtarget.hasVector()857 ) { |
406 | 269 | // Scalar-to-vector conversion is just a subreg. |
407 | 269 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); |
408 | 269 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); |
409 | 269 | |
410 | 269 | // Some insertions and extractions can be done directly but others |
411 | 269 | // need to go via integers. |
412 | 269 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); |
413 | 269 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); |
414 | 269 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); |
415 | 269 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); |
416 | 269 | |
417 | 269 | // These operations have direct equivalents. |
418 | 269 | setOperationAction(ISD::FADD, MVT::v2f64, Legal); |
419 | 269 | setOperationAction(ISD::FNEG, MVT::v2f64, Legal); |
420 | 269 | setOperationAction(ISD::FSUB, MVT::v2f64, Legal); |
421 | 269 | setOperationAction(ISD::FMUL, MVT::v2f64, Legal); |
422 | 269 | setOperationAction(ISD::FMA, MVT::v2f64, Legal); |
423 | 269 | setOperationAction(ISD::FDIV, MVT::v2f64, Legal); |
424 | 269 | setOperationAction(ISD::FABS, MVT::v2f64, Legal); |
425 | 269 | setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); |
426 | 269 | setOperationAction(ISD::FRINT, MVT::v2f64, Legal); |
427 | 269 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); |
428 | 269 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); |
429 | 269 | setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); |
430 | 269 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); |
431 | 269 | setOperationAction(ISD::FROUND, MVT::v2f64, Legal); |
432 | 269 | } |
433 | 857 | |
434 | 857 | // The vector enhancements facility 1 has instructions for these. |
435 | 857 | if (Subtarget.hasVectorEnhancements1()857 ) { |
436 | 62 | setOperationAction(ISD::FADD, MVT::v4f32, Legal); |
437 | 62 | setOperationAction(ISD::FNEG, MVT::v4f32, Legal); |
438 | 62 | setOperationAction(ISD::FSUB, MVT::v4f32, Legal); |
439 | 62 | setOperationAction(ISD::FMUL, MVT::v4f32, Legal); |
440 | 62 | setOperationAction(ISD::FMA, MVT::v4f32, Legal); |
441 | 62 | setOperationAction(ISD::FDIV, MVT::v4f32, Legal); |
442 | 62 | setOperationAction(ISD::FABS, MVT::v4f32, Legal); |
443 | 62 | setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); |
444 | 62 | setOperationAction(ISD::FRINT, MVT::v4f32, Legal); |
445 | 62 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); |
446 | 62 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); |
447 | 62 | setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); |
448 | 62 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); |
449 | 62 | setOperationAction(ISD::FROUND, MVT::v4f32, Legal); |
450 | 62 | |
451 | 62 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
452 | 62 | setOperationAction(ISD::FMAXNAN, MVT::f64, Legal); |
453 | 62 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
454 | 62 | setOperationAction(ISD::FMINNAN, MVT::f64, Legal); |
455 | 62 | |
456 | 62 | setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); |
457 | 62 | setOperationAction(ISD::FMAXNAN, MVT::v2f64, Legal); |
458 | 62 | setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); |
459 | 62 | setOperationAction(ISD::FMINNAN, MVT::v2f64, Legal); |
460 | 62 | |
461 | 62 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); |
462 | 62 | setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); |
463 | 62 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); |
464 | 62 | setOperationAction(ISD::FMINNAN, MVT::f32, Legal); |
465 | 62 | |
466 | 62 | setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); |
467 | 62 | setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); |
468 | 62 | setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); |
469 | 62 | setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); |
470 | 62 | |
471 | 62 | setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); |
472 | 62 | setOperationAction(ISD::FMAXNAN, MVT::f128, Legal); |
473 | 62 | setOperationAction(ISD::FMINNUM, MVT::f128, Legal); |
474 | 62 | setOperationAction(ISD::FMINNAN, MVT::f128, Legal); |
475 | 62 | } |
476 | 857 | |
477 | 857 | // We have fused multiply-addition for f32 and f64 but not f128. |
478 | 857 | setOperationAction(ISD::FMA, MVT::f32, Legal); |
479 | 857 | setOperationAction(ISD::FMA, MVT::f64, Legal); |
480 | 857 | if (Subtarget.hasVectorEnhancements1()) |
481 | 62 | setOperationAction(ISD::FMA, MVT::f128, Legal); |
482 | 857 | else |
483 | 795 | setOperationAction(ISD::FMA, MVT::f128, Expand); |
484 | 857 | |
485 | 857 | // We don't have a copysign instruction on vector registers. |
486 | 857 | if (Subtarget.hasVectorEnhancements1()) |
487 | 62 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); |
488 | 857 | |
489 | 857 | // Needed so that we don't try to implement f128 constant loads using |
490 | 857 | // a load-and-extend of a f80 constant (in cases where the constant |
491 | 857 | // would fit in an f80). |
492 | 857 | for (MVT VT : MVT::fp_valuetypes()) |
493 | 5.14k | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); |
494 | 857 | |
495 | 857 | // We don't have extending load instruction on vector registers. |
496 | 857 | if (Subtarget.hasVectorEnhancements1()857 ) { |
497 | 62 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); |
498 | 62 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); |
499 | 62 | } |
500 | 857 | |
501 | 857 | // Floating-point truncation and stores need to be done separately. |
502 | 857 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
503 | 857 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); |
504 | 857 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); |
505 | 857 | |
506 | 857 | // We have 64-bit FPR<->GPR moves, but need special handling for |
507 | 857 | // 32-bit forms. |
508 | 857 | if (!Subtarget.hasVector()857 ) { |
509 | 588 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); |
510 | 588 | setOperationAction(ISD::BITCAST, MVT::f32, Custom); |
511 | 588 | } |
512 | 857 | |
513 | 857 | // VASTART and VACOPY need to deal with the SystemZ-specific varargs |
514 | 857 | // structure, but VAEND is a no-op. |
515 | 857 | setOperationAction(ISD::VASTART, MVT::Other, Custom); |
516 | 857 | setOperationAction(ISD::VACOPY, MVT::Other, Custom); |
517 | 857 | setOperationAction(ISD::VAEND, MVT::Other, Expand); |
518 | 857 | |
519 | 857 | // Codes for which we want to perform some z-specific combinations. |
520 | 857 | setTargetDAGCombine(ISD::SIGN_EXTEND); |
521 | 857 | setTargetDAGCombine(ISD::STORE); |
522 | 857 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); |
523 | 857 | setTargetDAGCombine(ISD::FP_ROUND); |
524 | 857 | setTargetDAGCombine(ISD::BSWAP); |
525 | 857 | setTargetDAGCombine(ISD::SHL); |
526 | 857 | setTargetDAGCombine(ISD::SRA); |
527 | 857 | setTargetDAGCombine(ISD::SRL); |
528 | 857 | setTargetDAGCombine(ISD::ROTL); |
529 | 857 | |
530 | 857 | // Handle intrinsics. |
531 | 857 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); |
532 | 857 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
533 | 857 | |
534 | 857 | // We want to use MVC in preference to even a single load/store pair. |
535 | 857 | MaxStoresPerMemcpy = 0; |
536 | 857 | MaxStoresPerMemcpyOptSize = 0; |
537 | 857 | |
538 | 857 | // The main memset sequence is a byte store followed by an MVC. |
539 | 857 | // Two STC or MV..I stores win over that, but the kind of fused stores |
540 | 857 | // generated by target-independent code don't when the byte value is |
541 | 857 | // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better |
542 | 857 | // than "STC;MVC". Handle the choice in target-specific code instead. |
543 | 857 | MaxStoresPerMemset = 0; |
544 | 857 | MaxStoresPerMemsetOptSize = 0; |
545 | 857 | } |
546 | | |
547 | | EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, |
548 | 7.54k | LLVMContext &, EVT VT) const { |
549 | 7.54k | if (!VT.isVector()) |
550 | 3.60k | return MVT::i32; |
551 | 3.94k | return VT.changeVectorElementTypeToInteger(); |
552 | 3.94k | } |
553 | | |
554 | 1.35k | bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { |
555 | 1.35k | VT = VT.getScalarType(); |
556 | 1.35k | |
557 | 1.35k | if (!VT.isSimple()) |
558 | 0 | return false; |
559 | 1.35k | |
560 | 1.35k | switch (VT.getSimpleVT().SimpleTy) { |
561 | 1.26k | case MVT::f32: |
562 | 1.26k | case MVT::f64: |
563 | 1.26k | return true; |
564 | 85 | case MVT::f128: |
565 | 85 | return Subtarget.hasVectorEnhancements1(); |
566 | 0 | default: |
567 | 0 | break; |
568 | 0 | } |
569 | 0 |
|
570 | 0 | return false; |
571 | 0 | } |
572 | | |
573 | 560 | bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { |
574 | 560 | // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. |
575 | 197 | return Imm.isZero() || Imm.isNegZero(); |
576 | 560 | } |
577 | | |
578 | 2.32k | bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
579 | 2.32k | // We can use CGFI or CLGFI. |
580 | 35 | return isInt<32>(Imm) || isUInt<32>(Imm); |
581 | 2.32k | } |
582 | | |
583 | 21 | bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
584 | 21 | // We can use ALGFI or SLGFI. |
585 | 12 | return isUInt<32>(Imm) || isUInt<32>(-Imm); |
586 | 21 | } |
587 | | |
588 | | bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, |
589 | | unsigned, |
590 | | unsigned, |
591 | 442 | bool *Fast) const { |
592 | 442 | // Unaligned accesses should never be slower than the expanded version. |
593 | 442 | // We check specifically for aligned accesses in the few cases where |
594 | 442 | // they are required. |
595 | 442 | if (Fast) |
596 | 14 | *Fast = true; |
597 | 442 | return true; |
598 | 442 | } |
599 | | |
600 | | // Information about the addressing mode for a memory access. |
601 | | struct AddressingMode { |
602 | | // True if a long displacement is supported. |
603 | | bool LongDisplacement; |
604 | | |
605 | | // True if use of index register is supported. |
606 | | bool IndexReg; |
607 | | |
608 | | AddressingMode(bool LongDispl, bool IdxReg) : |
609 | 17.3k | LongDisplacement(LongDispl), IndexReg(IdxReg) {} |
610 | | }; |
611 | | |
612 | | // Return the desired addressing mode for a Load which has only one use (in |
613 | | // the same block) which is a Store. |
614 | | static AddressingMode getLoadStoreAddrMode(bool HasVector, |
615 | 841 | Type *Ty) { |
616 | 841 | // With vector support a Load->Store combination may be combined to either |
617 | 841 | // an MVC or vector operations and it seems to work best to allow the |
618 | 841 | // vector addressing mode. |
619 | 841 | if (HasVector) |
620 | 841 | return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); |
621 | 0 |
|
622 | 0 | // Otherwise only the MVC case is special. |
623 | 0 | bool MVC = Ty->isIntegerTy(8); |
624 | 0 | return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/); |
625 | 0 | } |
626 | | |
627 | | // Return the addressing mode which seems most desirable given an LLVM |
628 | | // Instruction pointer. |
629 | | static AddressingMode |
630 | 1.73k | supportedAddressingMode(Instruction *I, bool HasVector) { |
631 | 1.73k | if (IntrinsicInst *II1.73k = dyn_cast<IntrinsicInst>(I)) { |
632 | 396 | switch (II->getIntrinsicID()) { |
633 | 0 | default: break; |
634 | 396 | case Intrinsic::memset: |
635 | 396 | case Intrinsic::memmove: |
636 | 396 | case Intrinsic::memcpy: |
637 | 396 | return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); |
638 | 1.33k | } |
639 | 1.33k | } |
640 | 1.33k | |
641 | 1.33k | if (1.33k isa<LoadInst>(I) && 1.33k I->hasOneUse()802 ) { |
642 | 782 | auto *SingleUser = dyn_cast<Instruction>(*I->user_begin()); |
643 | 782 | if (SingleUser->getParent() == I->getParent()782 ) { |
644 | 782 | if (isa<ICmpInst>(SingleUser)782 ) { |
645 | 12 | if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) |
646 | 12 | if (12 isInt<16>(C->getSExtValue()) || 12 isUInt<16>(C->getZExtValue())0 ) |
647 | 12 | // Comparison of memory with 16 bit signed / unsigned immediate |
648 | 12 | return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); |
649 | 770 | } else if (770 isa<StoreInst>(SingleUser)770 ) |
650 | 770 | // Load->Store |
651 | 382 | return getLoadStoreAddrMode(HasVector, I->getType()); |
652 | 1.33k | } |
653 | 553 | } else if (auto *553 StoreI553 = dyn_cast<StoreInst>(I)) { |
654 | 533 | if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) |
655 | 459 | if (459 LoadI->hasOneUse() && 459 LoadI->getParent() == I->getParent()459 ) |
656 | 459 | // Load->Store |
657 | 459 | return getLoadStoreAddrMode(HasVector, LoadI->getType()); |
658 | 482 | } |
659 | 482 | |
660 | 482 | if (482 HasVector && 482 (isa<LoadInst>(I) || 234 isa<StoreInst>(I)43 )) { |
661 | 234 | |
662 | 234 | // * Use LDE instead of LE/LEY for z13 to avoid partial register |
663 | 234 | // dependencies (LDE only supports small offsets). |
664 | 234 | // * Utilize the vector registers to hold floating point |
665 | 234 | // values (vector load / store instructions only support small |
666 | 234 | // offsets). |
667 | 234 | |
668 | 191 | Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : |
669 | 43 | I->getOperand(0)->getType()); |
670 | 234 | bool IsFPAccess = MemAccessTy->isFloatingPointTy(); |
671 | 234 | bool IsVectorAccess = MemAccessTy->isVectorTy(); |
672 | 234 | |
673 | 234 | // A store of an extracted vector element will be combined into a VSTE type |
674 | 234 | // instruction. |
675 | 234 | if (!IsVectorAccess && 234 isa<StoreInst>(I)170 ) { |
676 | 36 | Value *DataOp = I->getOperand(0); |
677 | 36 | if (isa<ExtractElementInst>(DataOp)) |
678 | 0 | IsVectorAccess = true; |
679 | 36 | } |
680 | 234 | |
681 | 234 | // A load which gets inserted into a vector element will be combined into a |
682 | 234 | // VLE type instruction. |
683 | 234 | if (!IsVectorAccess && 234 isa<LoadInst>(I)170 && I->hasOneUse()134 ) { |
684 | 124 | User *LoadUser = *I->user_begin(); |
685 | 124 | if (isa<InsertElementInst>(LoadUser)) |
686 | 0 | IsVectorAccess = true; |
687 | 124 | } |
688 | 234 | |
689 | 234 | if (IsFPAccess || 234 IsVectorAccess84 ) |
690 | 214 | return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); |
691 | 268 | } |
692 | 268 | |
693 | 268 | return AddressingMode(true/*LongDispl*/, true/*IdxReg*/); |
694 | 268 | } |
695 | | |
696 | | bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
697 | 16.3k | const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { |
698 | 16.3k | // Punt on globals for now, although they can be used in limited |
699 | 16.3k | // RELATIVE LONG cases. |
700 | 16.3k | if (AM.BaseGV) |
701 | 477 | return false; |
702 | 15.8k | |
703 | 15.8k | // Require a 20-bit signed offset. |
704 | 15.8k | if (15.8k !isInt<20>(AM.BaseOffs)15.8k ) |
705 | 315 | return false; |
706 | 15.5k | |
707 | 15.5k | AddressingMode SupportedAM(true, true); |
708 | 15.5k | if (I != nullptr) |
709 | 1.73k | SupportedAM = supportedAddressingMode(I, Subtarget.hasVector()); |
710 | 15.5k | |
711 | 15.5k | if (!SupportedAM.LongDisplacement && 15.5k !isUInt<12>(AM.BaseOffs)1.46k ) |
712 | 610 | return false; |
713 | 14.9k | |
714 | 14.9k | if (14.9k !SupportedAM.IndexReg14.9k ) |
715 | 14.9k | // No indexing allowed. |
716 | 230 | return AM.Scale == 0; |
717 | 14.9k | else |
718 | 14.9k | // Indexing is OK but no scale factor can be applied. |
719 | 14.7k | return AM.Scale == 0 || 14.7k AM.Scale == 13.19k ; |
720 | 0 | } |
721 | | |
722 | 313 | bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { |
723 | 313 | if (!FromType->isIntegerTy() || 313 !ToType->isIntegerTy()305 ) |
724 | 8 | return false; |
725 | 305 | unsigned FromBits = FromType->getPrimitiveSizeInBits(); |
726 | 305 | unsigned ToBits = ToType->getPrimitiveSizeInBits(); |
727 | 305 | return FromBits > ToBits; |
728 | 305 | } |
729 | | |
730 | 696 | bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { |
731 | 696 | if (!FromVT.isInteger() || 696 !ToVT.isInteger()696 ) |
732 | 0 | return false; |
733 | 696 | unsigned FromBits = FromVT.getSizeInBits(); |
734 | 696 | unsigned ToBits = ToVT.getSizeInBits(); |
735 | 696 | return FromBits > ToBits; |
736 | 696 | } |
737 | | |
738 | | //===----------------------------------------------------------------------===// |
739 | | // Inline asm support |
740 | | //===----------------------------------------------------------------------===// |
741 | | |
742 | | TargetLowering::ConstraintType |
743 | 1.68k | SystemZTargetLowering::getConstraintType(StringRef Constraint) const { |
744 | 1.68k | if (Constraint.size() == 11.68k ) { |
745 | 1.54k | switch (Constraint[0]) { |
746 | 1.25k | case 'a': // Address register |
747 | 1.25k | case 'd': // Data register (equivalent to 'r') |
748 | 1.25k | case 'f': // Floating-point register |
749 | 1.25k | case 'h': // High-part register |
750 | 1.25k | case 'r': // General-purpose register |
751 | 1.25k | return C_RegisterClass; |
752 | 1.25k | |
753 | 69 | case 'Q': // Memory with base and unsigned 12-bit displacement |
754 | 69 | case 'R': // Likewise, plus an index |
755 | 69 | case 'S': // Memory with base and signed 20-bit displacement |
756 | 69 | case 'T': // Likewise, plus an index |
757 | 69 | case 'm': // Equivalent to 'T'. |
758 | 69 | return C_Memory; |
759 | 69 | |
760 | 88 | case 'I': // Unsigned 8-bit constant |
761 | 88 | case 'J': // Unsigned 12-bit constant |
762 | 88 | case 'K': // Signed 16-bit constant |
763 | 88 | case 'L': // Signed 20-bit displacement (on all targets we support) |
764 | 88 | case 'M': // 0x7fffffff |
765 | 88 | return C_Other; |
766 | 88 | |
767 | 132 | default: |
768 | 132 | break; |
769 | 280 | } |
770 | 280 | } |
771 | 280 | return TargetLowering::getConstraintType(Constraint); |
772 | 280 | } |
773 | | |
774 | | TargetLowering::ConstraintWeight SystemZTargetLowering:: |
775 | | getSingleConstraintMatchWeight(AsmOperandInfo &info, |
776 | 0 | const char *constraint) const { |
777 | 0 | ConstraintWeight weight = CW_Invalid; |
778 | 0 | Value *CallOperandVal = info.CallOperandVal; |
779 | 0 | // If we don't have a value, we can't do a match, |
780 | 0 | // but allow it at the lowest weight. |
781 | 0 | if (!CallOperandVal) |
782 | 0 | return CW_Default; |
783 | 0 | Type *type = CallOperandVal->getType(); |
784 | 0 | // Look at the constraint type. |
785 | 0 | switch (*constraint) { |
786 | 0 | default: |
787 | 0 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
788 | 0 | break; |
789 | 0 |
|
790 | 0 | case 'a': // Address register |
791 | 0 | case 'd': // Data register (equivalent to 'r') |
792 | 0 | case 'h': // High-part register |
793 | 0 | case 'r': // General-purpose register |
794 | 0 | if (CallOperandVal->getType()->isIntegerTy()) |
795 | 0 | weight = CW_Register; |
796 | 0 | break; |
797 | 0 |
|
798 | 0 | case 'f': // Floating-point register |
799 | 0 | if (type->isFloatingPointTy()) |
800 | 0 | weight = CW_Register; |
801 | 0 | break; |
802 | 0 |
|
803 | 0 | case 'I': // Unsigned 8-bit constant |
804 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
805 | 0 | if (0 isUInt<8>(C->getZExtValue())0 ) |
806 | 0 | weight = CW_Constant; |
807 | 0 | break; |
808 | 0 |
|
809 | 0 | case 'J': // Unsigned 12-bit constant |
810 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
811 | 0 | if (0 isUInt<12>(C->getZExtValue())0 ) |
812 | 0 | weight = CW_Constant; |
813 | 0 | break; |
814 | 0 |
|
815 | 0 | case 'K': // Signed 16-bit constant |
816 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
817 | 0 | if (0 isInt<16>(C->getSExtValue())0 ) |
818 | 0 | weight = CW_Constant; |
819 | 0 | break; |
820 | 0 |
|
821 | 0 | case 'L': // Signed 20-bit displacement (on all targets we support) |
822 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
823 | 0 | if (0 isInt<20>(C->getSExtValue())0 ) |
824 | 0 | weight = CW_Constant; |
825 | 0 | break; |
826 | 0 |
|
827 | 0 | case 'M': // 0x7fffffff |
828 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
829 | 0 | if (0 C->getZExtValue() == 0x7fffffff0 ) |
830 | 0 | weight = CW_Constant; |
831 | 0 | break; |
832 | 0 | } |
833 | 0 | return weight; |
834 | 0 | } |
835 | | |
836 | | // Parse a "{tNNN}" register constraint for which the register type "t" |
837 | | // has already been verified. MC is the class associated with "t" and |
838 | | // Map maps 0-based register numbers to LLVM register numbers. |
839 | | static std::pair<unsigned, const TargetRegisterClass *> |
840 | | parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, |
841 | 35 | const unsigned *Map) { |
842 | 35 | assert(*(Constraint.end()-1) == '}' && "Missing '}'"); |
843 | 35 | if (isdigit(Constraint[2])35 ) { |
844 | 35 | unsigned Index; |
845 | 35 | bool Failed = |
846 | 35 | Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); |
847 | 35 | if (!Failed && 35 Index < 1635 && Map[Index]35 ) |
848 | 35 | return std::make_pair(Map[Index], RC); |
849 | 0 | } |
850 | 0 | return std::make_pair(0U, nullptr); |
851 | 0 | } |
852 | | |
853 | | std::pair<unsigned, const TargetRegisterClass *> |
854 | | SystemZTargetLowering::getRegForInlineAsmConstraint( |
855 | 426 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
856 | 426 | if (Constraint.size() == 1426 ) { |
857 | 303 | // GCC Constraint Letters |
858 | 303 | switch (Constraint[0]) { |
859 | 0 | default: break; |
860 | 152 | case 'd': // Data register (equivalent to 'r') |
861 | 152 | case 'r': // General-purpose register |
862 | 152 | if (VT == MVT::i64) |
863 | 18 | return std::make_pair(0U, &SystemZ::GR64BitRegClass); |
864 | 134 | else if (134 VT == MVT::i128134 ) |
865 | 0 | return std::make_pair(0U, &SystemZ::GR128BitRegClass); |
866 | 134 | return std::make_pair(0U, &SystemZ::GR32BitRegClass); |
867 | 134 | |
868 | 4 | case 'a': // Address register |
869 | 4 | if (VT == MVT::i64) |
870 | 1 | return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); |
871 | 3 | else if (3 VT == MVT::i1283 ) |
872 | 0 | return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); |
873 | 3 | return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); |
874 | 3 | |
875 | 141 | case 'h': // High-part register (an LLVM extension) |
876 | 141 | return std::make_pair(0U, &SystemZ::GRH32BitRegClass); |
877 | 3 | |
878 | 6 | case 'f': // Floating-point register |
879 | 6 | if (VT == MVT::f64) |
880 | 3 | return std::make_pair(0U, &SystemZ::FP64BitRegClass); |
881 | 3 | else if (3 VT == MVT::f1283 ) |
882 | 1 | return std::make_pair(0U, &SystemZ::FP128BitRegClass); |
883 | 2 | return std::make_pair(0U, &SystemZ::FP32BitRegClass); |
884 | 303 | } |
885 | 303 | } |
886 | 123 | if (123 Constraint.size() > 0 && 123 Constraint[0] == '{'43 ) { |
887 | 43 | // We need to override the default register parsing for GPRs and FPRs |
888 | 43 | // because the interpretation depends on VT. The internal names of |
889 | 43 | // the registers are also different from the external names |
890 | 43 | // (F0D and F0S instead of F0, etc.). |
891 | 43 | if (Constraint[1] == 'r'43 ) { |
892 | 26 | if (VT == MVT::i32) |
893 | 13 | return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, |
894 | 13 | SystemZMC::GR32Regs); |
895 | 13 | if (13 VT == MVT::i12813 ) |
896 | 0 | return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, |
897 | 0 | SystemZMC::GR128Regs); |
898 | 13 | return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, |
899 | 13 | SystemZMC::GR64Regs); |
900 | 13 | } |
901 | 17 | if (17 Constraint[1] == 'f'17 ) { |
902 | 9 | if (VT == MVT::f32) |
903 | 3 | return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, |
904 | 3 | SystemZMC::FP32Regs); |
905 | 6 | if (6 VT == MVT::f1286 ) |
906 | 1 | return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, |
907 | 1 | SystemZMC::FP128Regs); |
908 | 5 | return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, |
909 | 5 | SystemZMC::FP64Regs); |
910 | 5 | } |
911 | 43 | } |
912 | 88 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
913 | 88 | } |
914 | | |
915 | | void SystemZTargetLowering:: |
916 | | LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, |
917 | | std::vector<SDValue> &Ops, |
918 | 32 | SelectionDAG &DAG) const { |
919 | 32 | // Only support length 1 constraints for now. |
920 | 32 | if (Constraint.length() == 132 ) { |
921 | 32 | switch (Constraint[0]) { |
922 | 6 | case 'I': // Unsigned 8-bit constant |
923 | 6 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
924 | 6 | if (6 isUInt<8>(C->getZExtValue())6 ) |
925 | 4 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), |
926 | 4 | Op.getValueType())); |
927 | 6 | return; |
928 | 32 | |
929 | 6 | case 'J': // Unsigned 12-bit constant |
930 | 6 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
931 | 6 | if (6 isUInt<12>(C->getZExtValue())6 ) |
932 | 4 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), |
933 | 4 | Op.getValueType())); |
934 | 6 | return; |
935 | 32 | |
936 | 6 | case 'K': // Signed 16-bit constant |
937 | 6 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
938 | 6 | if (6 isInt<16>(C->getSExtValue())6 ) |
939 | 4 | Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), |
940 | 4 | Op.getValueType())); |
941 | 6 | return; |
942 | 32 | |
943 | 6 | case 'L': // Signed 20-bit displacement (on all targets we support) |
944 | 6 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
945 | 6 | if (6 isInt<20>(C->getSExtValue())6 ) |
946 | 4 | Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), |
947 | 4 | Op.getValueType())); |
948 | 6 | return; |
949 | 32 | |
950 | 8 | case 'M': // 0x7fffffff |
951 | 8 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
952 | 8 | if (8 C->getZExtValue() == 0x7fffffff8 ) |
953 | 4 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), |
954 | 4 | Op.getValueType())); |
955 | 8 | return; |
956 | 0 | } |
957 | 0 | } |
958 | 0 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
959 | 0 | } |
960 | | |
961 | | //===----------------------------------------------------------------------===// |
962 | | // Calling conventions |
963 | | //===----------------------------------------------------------------------===// |
964 | | |
965 | | #include "SystemZGenCallingConv.inc" |
966 | | |
967 | | bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, |
968 | 3 | Type *ToType) const { |
969 | 3 | return isTruncateFree(FromType, ToType); |
970 | 3 | } |
971 | | |
972 | 10 | bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
973 | 10 | return CI->isTailCall(); |
974 | 10 | } |
975 | | |
976 | | // We do not yet support 128-bit single-element vector types. If the user |
977 | | // attempts to use such types as function argument or return type, prefer |
978 | | // to error out instead of emitting code violating the ABI. |
979 | 12.2k | static void VerifyVectorType(MVT VT, EVT ArgVT) { |
980 | 12.2k | if (ArgVT.isVector() && 12.2k !VT.isVector()9.84k ) |
981 | 8 | report_fatal_error("Unsupported vector argument or return type"); |
982 | 12.2k | } |
983 | | |
984 | 2.39k | static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) { |
985 | 9.53k | for (unsigned i = 0; i < Ins.size()9.53k ; ++i7.13k ) |
986 | 7.13k | VerifyVectorType(Ins[i].VT, Ins[i].ArgVT); |
987 | 2.39k | } |
988 | | |
989 | 4.79k | static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) { |
990 | 9.88k | for (unsigned i = 0; i < Outs.size()9.88k ; ++i5.09k ) |
991 | 5.09k | VerifyVectorType(Outs[i].VT, Outs[i].ArgVT); |
992 | 4.79k | } |
993 | | |
994 | | // Value is a value that has been passed to us in the location described by VA |
995 | | // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining |
996 | | // any loads onto Chain. |
997 | | static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, |
998 | | CCValAssign &VA, SDValue Chain, |
999 | 15.0k | SDValue Value) { |
1000 | 15.0k | // If the argument has been promoted from a smaller type, insert an |
1001 | 15.0k | // assertion to capture this. |
1002 | 15.0k | if (VA.getLocInfo() == CCValAssign::SExt) |
1003 | 30 | Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, |
1004 | 30 | DAG.getValueType(VA.getValVT())); |
1005 | 15.0k | else if (15.0k VA.getLocInfo() == CCValAssign::ZExt15.0k ) |
1006 | 17 | Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, |
1007 | 17 | DAG.getValueType(VA.getValVT())); |
1008 | 15.0k | |
1009 | 15.0k | if (VA.isExtInLoc()) |
1010 | 47 | Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); |
1011 | 15.0k | else if (15.0k VA.getLocInfo() == CCValAssign::BCvt15.0k ) { |
1012 | 12 | // If this is a short vector argument loaded from the stack, |
1013 | 12 | // extend from i64 to full vector size and then bitcast. |
1014 | 12 | assert(VA.getLocVT() == MVT::i64); |
1015 | 12 | assert(VA.getValVT().isVector()); |
1016 | 12 | Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)}); |
1017 | 12 | Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); |
1018 | 12 | } else |
1019 | 15.0k | assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); |
1020 | 15.0k | return Value; |
1021 | 15.0k | } |
1022 | | |
1023 | | // Value is a value of type VA.getValVT() that we need to copy into |
1024 | | // the location described by VA. Return a copy of Value converted to |
1025 | | // VA.getValVT(). The caller is responsible for handling indirect values. |
1026 | | static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, |
1027 | 6.17k | CCValAssign &VA, SDValue Value) { |
1028 | 6.17k | switch (VA.getLocInfo()) { |
1029 | 45 | case CCValAssign::SExt: |
1030 | 45 | return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); |
1031 | 41 | case CCValAssign::ZExt: |
1032 | 41 | return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); |
1033 | 0 | case CCValAssign::AExt: |
1034 | 0 | return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); |
1035 | 12 | case CCValAssign::BCvt: |
1036 | 12 | // If this is a short vector argument to be stored to the stack, |
1037 | 12 | // bitcast to v2i64 and then extract first element. |
1038 | 12 | assert(VA.getLocVT() == MVT::i64); |
1039 | 12 | assert(VA.getValVT().isVector()); |
1040 | 12 | Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value); |
1041 | 12 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value, |
1042 | 12 | DAG.getConstant(0, DL, MVT::i32)); |
1043 | 6.07k | case CCValAssign::Full: |
1044 | 6.07k | return Value; |
1045 | 0 | default: |
1046 | 0 | llvm_unreachable("Unhandled getLocInfo()"); |
1047 | 0 | } |
1048 | 0 | } |
1049 | | |
1050 | | SDValue SystemZTargetLowering::LowerFormalArguments( |
1051 | | SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, |
1052 | | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
1053 | 6.58k | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
1054 | 6.58k | MachineFunction &MF = DAG.getMachineFunction(); |
1055 | 6.58k | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1056 | 6.58k | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1057 | 6.58k | SystemZMachineFunctionInfo *FuncInfo = |
1058 | 6.58k | MF.getInfo<SystemZMachineFunctionInfo>(); |
1059 | 6.58k | auto *TFL = |
1060 | 6.58k | static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); |
1061 | 6.58k | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
1062 | 6.58k | |
1063 | 6.58k | // Detect unsupported vector argument types. |
1064 | 6.58k | if (Subtarget.hasVector()) |
1065 | 2.36k | VerifyVectorTypes(Ins); |
1066 | 6.58k | |
1067 | 6.58k | // Assign locations to all of the incoming arguments. |
1068 | 6.58k | SmallVector<CCValAssign, 16> ArgLocs; |
1069 | 6.58k | SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
1070 | 6.58k | CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); |
1071 | 6.58k | |
1072 | 6.58k | unsigned NumFixedGPRs = 0; |
1073 | 6.58k | unsigned NumFixedFPRs = 0; |
1074 | 21.4k | for (unsigned I = 0, E = ArgLocs.size(); I != E21.4k ; ++I14.8k ) { |
1075 | 14.8k | SDValue ArgValue; |
1076 | 14.8k | CCValAssign &VA = ArgLocs[I]; |
1077 | 14.8k | EVT LocVT = VA.getLocVT(); |
1078 | 14.8k | if (VA.isRegLoc()14.8k ) { |
1079 | 13.7k | // Arguments passed in registers |
1080 | 13.7k | const TargetRegisterClass *RC; |
1081 | 13.7k | switch (LocVT.getSimpleVT().SimpleTy) { |
1082 | 0 | default: |
1083 | 0 | // Integers smaller than i64 should be promoted to i64. |
1084 | 0 | llvm_unreachable("Unexpected argument type"); |
1085 | 1.82k | case MVT::i32: |
1086 | 1.82k | NumFixedGPRs += 1; |
1087 | 1.82k | RC = &SystemZ::GR32BitRegClass; |
1088 | 1.82k | break; |
1089 | 5.58k | case MVT::i64: |
1090 | 5.58k | NumFixedGPRs += 1; |
1091 | 5.58k | RC = &SystemZ::GR64BitRegClass; |
1092 | 5.58k | break; |
1093 | 554 | case MVT::f32: |
1094 | 554 | NumFixedFPRs += 1; |
1095 | 554 | RC = &SystemZ::FP32BitRegClass; |
1096 | 554 | break; |
1097 | 1.19k | case MVT::f64: |
1098 | 1.19k | NumFixedFPRs += 1; |
1099 | 1.19k | RC = &SystemZ::FP64BitRegClass; |
1100 | 1.19k | break; |
1101 | 4.54k | case MVT::v16i8: |
1102 | 4.54k | case MVT::v8i16: |
1103 | 4.54k | case MVT::v4i32: |
1104 | 4.54k | case MVT::v2i64: |
1105 | 4.54k | case MVT::v4f32: |
1106 | 4.54k | case MVT::v2f64: |
1107 | 4.54k | RC = &SystemZ::VR128BitRegClass; |
1108 | 4.54k | break; |
1109 | 13.7k | } |
1110 | 13.7k | |
1111 | 13.7k | unsigned VReg = MRI.createVirtualRegister(RC); |
1112 | 13.7k | MRI.addLiveIn(VA.getLocReg(), VReg); |
1113 | 13.7k | ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); |
1114 | 14.8k | } else { |
1115 | 1.15k | assert(VA.isMemLoc() && "Argument not register or memory"); |
1116 | 1.15k | |
1117 | 1.15k | // Create the frame index object for this incoming parameter. |
1118 | 1.15k | int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, |
1119 | 1.15k | VA.getLocMemOffset(), true); |
1120 | 1.15k | |
1121 | 1.15k | // Create the SelectionDAG nodes corresponding to a load |
1122 | 1.15k | // from this parameter. Unpromoted ints and floats are |
1123 | 1.15k | // passed as right-justified 8-byte values. |
1124 | 1.15k | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
1125 | 1.15k | if (VA.getLocVT() == MVT::i32 || 1.15k VA.getLocVT() == MVT::f321.14k ) |
1126 | 10 | FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, |
1127 | 10 | DAG.getIntPtrConstant(4, DL)); |
1128 | 1.15k | ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, |
1129 | 1.15k | MachinePointerInfo::getFixedStack(MF, FI)); |
1130 | 1.15k | } |
1131 | 14.8k | |
1132 | 14.8k | // Convert the value of the argument register into the value that's |
1133 | 14.8k | // being passed. |
1134 | 14.8k | if (14.8k VA.getLocInfo() == CCValAssign::Indirect14.8k ) { |
1135 | 58 | InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, |
1136 | 58 | MachinePointerInfo())); |
1137 | 58 | // If the original argument was split (e.g. i128), we need |
1138 | 58 | // to load all parts of it here (using the same address). |
1139 | 58 | unsigned ArgIndex = Ins[I].OrigArgIndex; |
1140 | 58 | assert (Ins[I].PartOffset == 0); |
1141 | 82 | while (I + 1 != E && 82 Ins[I + 1].OrigArgIndex == ArgIndex55 ) { |
1142 | 24 | CCValAssign &PartVA = ArgLocs[I + 1]; |
1143 | 24 | unsigned PartOffset = Ins[I + 1].PartOffset; |
1144 | 24 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, |
1145 | 24 | DAG.getIntPtrConstant(PartOffset, DL)); |
1146 | 24 | InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, |
1147 | 24 | MachinePointerInfo())); |
1148 | 24 | ++I; |
1149 | 24 | } |
1150 | 58 | } else |
1151 | 14.7k | InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); |
1152 | 14.8k | } |
1153 | 6.58k | |
1154 | 6.58k | if (6.58k IsVarArg6.58k ) { |
1155 | 0 | // Save the number of non-varargs registers for later use by va_start, etc. |
1156 | 0 | FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); |
1157 | 0 | FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); |
1158 | 0 |
|
1159 | 0 | // Likewise the address (in the form of a frame index) of where the |
1160 | 0 | // first stack vararg would be. The 1-byte size here is arbitrary. |
1161 | 0 | int64_t StackSize = CCInfo.getNextStackOffset(); |
1162 | 0 | FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); |
1163 | 0 |
|
1164 | 0 | // ...and a similar frame index for the caller-allocated save area |
1165 | 0 | // that will be used to store the incoming registers. |
1166 | 0 | int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); |
1167 | 0 | unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true); |
1168 | 0 | FuncInfo->setRegSaveFrameIndex(RegSaveIndex); |
1169 | 0 |
|
1170 | 0 | // Store the FPR varargs in the reserved frame slots. (We store the |
1171 | 0 | // GPRs as part of the prologue.) |
1172 | 0 | if (NumFixedFPRs < SystemZ::NumArgFPRs0 ) { |
1173 | 0 | SDValue MemOps[SystemZ::NumArgFPRs]; |
1174 | 0 | for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs0 ; ++I0 ) { |
1175 | 0 | unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); |
1176 | 0 | int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true); |
1177 | 0 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); |
1178 | 0 | unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], |
1179 | 0 | &SystemZ::FP64BitRegClass); |
1180 | 0 | SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); |
1181 | 0 | MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, |
1182 | 0 | MachinePointerInfo::getFixedStack(MF, FI)); |
1183 | 0 | } |
1184 | 0 | // Join the stores, which are independent of one another. |
1185 | 0 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, |
1186 | 0 | makeArrayRef(&MemOps[NumFixedFPRs], |
1187 | 0 | SystemZ::NumArgFPRs-NumFixedFPRs)); |
1188 | 0 | } |
1189 | 0 | } |
1190 | 6.58k | |
1191 | 6.58k | return Chain; |
1192 | 6.58k | } |
1193 | | |
1194 | | static bool canUseSiblingCall(const CCState &ArgCCInfo, |
1195 | | SmallVectorImpl<CCValAssign> &ArgLocs, |
1196 | 69 | SmallVectorImpl<ISD::OutputArg> &Outs) { |
1197 | 69 | // Punt if there are any indirect or stack arguments, or if the call |
1198 | 69 | // needs the callee-saved argument register R6, or if the call uses |
1199 | 69 | // the callee-saved register arguments SwiftSelf and SwiftError. |
1200 | 105 | for (unsigned I = 0, E = ArgLocs.size(); I != E105 ; ++I36 ) { |
1201 | 41 | CCValAssign &VA = ArgLocs[I]; |
1202 | 41 | if (VA.getLocInfo() == CCValAssign::Indirect) |
1203 | 1 | return false; |
1204 | 40 | if (40 !VA.isRegLoc()40 ) |
1205 | 1 | return false; |
1206 | 39 | unsigned Reg = VA.getLocReg(); |
1207 | 39 | if (Reg == SystemZ::R6H || 39 Reg == SystemZ::R6L39 || Reg == SystemZ::R6D39 ) |
1208 | 1 | return false; |
1209 | 38 | if (38 Outs[I].Flags.isSwiftSelf() || 38 Outs[I].Flags.isSwiftError()36 ) |
1210 | 2 | return false; |
1211 | 41 | } |
1212 | 64 | return true; |
1213 | 69 | } |
1214 | | |
1215 | | SDValue |
1216 | | SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, |
1217 | 454 | SmallVectorImpl<SDValue> &InVals) const { |
1218 | 454 | SelectionDAG &DAG = CLI.DAG; |
1219 | 454 | SDLoc &DL = CLI.DL; |
1220 | 454 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
1221 | 454 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
1222 | 454 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
1223 | 454 | SDValue Chain = CLI.Chain; |
1224 | 454 | SDValue Callee = CLI.Callee; |
1225 | 454 | bool &IsTailCall = CLI.IsTailCall; |
1226 | 454 | CallingConv::ID CallConv = CLI.CallConv; |
1227 | 454 | bool IsVarArg = CLI.IsVarArg; |
1228 | 454 | MachineFunction &MF = DAG.getMachineFunction(); |
1229 | 454 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
1230 | 454 | |
1231 | 454 | // Detect unsupported vector argument and return types. |
1232 | 454 | if (Subtarget.hasVector()454 ) { |
1233 | 38 | VerifyVectorTypes(Outs); |
1234 | 38 | VerifyVectorTypes(Ins); |
1235 | 38 | } |
1236 | 454 | |
1237 | 454 | // Analyze the operands of the call, assigning locations to each operand. |
1238 | 454 | SmallVector<CCValAssign, 16> ArgLocs; |
1239 | 454 | SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
1240 | 454 | ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); |
1241 | 454 | |
1242 | 454 | // We don't support GuaranteedTailCallOpt, only automatically-detected |
1243 | 454 | // sibling calls. |
1244 | 454 | if (IsTailCall && 454 !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs)69 ) |
1245 | 5 | IsTailCall = false; |
1246 | 454 | |
1247 | 454 | // Get a count of how many bytes are to be pushed on the stack. |
1248 | 454 | unsigned NumBytes = ArgCCInfo.getNextStackOffset(); |
1249 | 454 | |
1250 | 454 | // Mark the start of the call. |
1251 | 454 | if (!IsTailCall) |
1252 | 388 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); |
1253 | 454 | |
1254 | 454 | // Copy argument values to their designated locations. |
1255 | 454 | SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; |
1256 | 454 | SmallVector<SDValue, 8> MemOpChains; |
1257 | 454 | SDValue StackPtr; |
1258 | 1.31k | for (unsigned I = 0, E = ArgLocs.size(); I != E1.31k ; ++I860 ) { |
1259 | 860 | CCValAssign &VA = ArgLocs[I]; |
1260 | 860 | SDValue ArgValue = OutVals[I]; |
1261 | 860 | |
1262 | 860 | if (VA.getLocInfo() == CCValAssign::Indirect860 ) { |
1263 | 69 | // Store the argument in a stack slot and pass its address. |
1264 | 69 | SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT); |
1265 | 69 | int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); |
1266 | 69 | MemOpChains.push_back( |
1267 | 69 | DAG.getStore(Chain, DL, ArgValue, SpillSlot, |
1268 | 69 | MachinePointerInfo::getFixedStack(MF, FI))); |
1269 | 69 | // If the original argument was split (e.g. i128), we need |
1270 | 69 | // to store all parts of it here (and pass just one address). |
1271 | 69 | unsigned ArgIndex = Outs[I].OrigArgIndex; |
1272 | 69 | assert (Outs[I].PartOffset == 0); |
1273 | 77 | while (I + 1 != E && 77 Outs[I + 1].OrigArgIndex == ArgIndex38 ) { |
1274 | 8 | SDValue PartValue = OutVals[I + 1]; |
1275 | 8 | unsigned PartOffset = Outs[I + 1].PartOffset; |
1276 | 8 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, |
1277 | 8 | DAG.getIntPtrConstant(PartOffset, DL)); |
1278 | 8 | MemOpChains.push_back( |
1279 | 8 | DAG.getStore(Chain, DL, PartValue, Address, |
1280 | 8 | MachinePointerInfo::getFixedStack(MF, FI))); |
1281 | 8 | ++I; |
1282 | 8 | } |
1283 | 69 | ArgValue = SpillSlot; |
1284 | 69 | } else |
1285 | 791 | ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); |
1286 | 860 | |
1287 | 860 | if (VA.isRegLoc()) |
1288 | 860 | // Queue up the argument copies and emit them at the end. |
1289 | 660 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); |
1290 | 200 | else { |
1291 | 200 | assert(VA.isMemLoc() && "Argument not register or memory"); |
1292 | 200 | |
1293 | 200 | // Work out the address of the stack slot. Unpromoted ints and |
1294 | 200 | // floats are passed as right-justified 8-byte values. |
1295 | 200 | if (!StackPtr.getNode()) |
1296 | 38 | StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); |
1297 | 200 | unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); |
1298 | 200 | if (VA.getLocVT() == MVT::i32 || 200 VA.getLocVT() == MVT::f32182 ) |
1299 | 37 | Offset += 4; |
1300 | 200 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, |
1301 | 200 | DAG.getIntPtrConstant(Offset, DL)); |
1302 | 200 | |
1303 | 200 | // Emit the store. |
1304 | 200 | MemOpChains.push_back( |
1305 | 200 | DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); |
1306 | 200 | } |
1307 | 860 | } |
1308 | 454 | |
1309 | 454 | // Join the stores, which are independent of one another. |
1310 | 454 | if (!MemOpChains.empty()) |
1311 | 58 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); |
1312 | 454 | |
1313 | 454 | // Accept direct calls by converting symbolic call addresses to the |
1314 | 454 | // associated Target* opcodes. Force %r1 to be used for indirect |
1315 | 454 | // tail calls. |
1316 | 454 | SDValue Glue; |
1317 | 454 | if (auto *G454 = dyn_cast<GlobalAddressSDNode>(Callee)) { |
1318 | 362 | Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); |
1319 | 362 | Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); |
1320 | 454 | } else if (auto *92 E92 = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
1321 | 60 | Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); |
1322 | 60 | Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); |
1323 | 92 | } else if (32 IsTailCall32 ) { |
1324 | 28 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); |
1325 | 28 | Glue = Chain.getValue(1); |
1326 | 28 | Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); |
1327 | 28 | } |
1328 | 454 | |
1329 | 454 | // Build a sequence of copy-to-reg nodes, chained and glued together. |
1330 | 1.11k | for (unsigned I = 0, E = RegsToPass.size(); I != E1.11k ; ++I660 ) { |
1331 | 660 | Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, |
1332 | 660 | RegsToPass[I].second, Glue); |
1333 | 660 | Glue = Chain.getValue(1); |
1334 | 660 | } |
1335 | 454 | |
1336 | 454 | // The first call operand is the chain and the second is the target address. |
1337 | 454 | SmallVector<SDValue, 8> Ops; |
1338 | 454 | Ops.push_back(Chain); |
1339 | 454 | Ops.push_back(Callee); |
1340 | 454 | |
1341 | 454 | // Add argument registers to the end of the list so that they are |
1342 | 454 | // known live into the call. |
1343 | 1.11k | for (unsigned I = 0, E = RegsToPass.size(); I != E1.11k ; ++I660 ) |
1344 | 660 | Ops.push_back(DAG.getRegister(RegsToPass[I].first, |
1345 | 660 | RegsToPass[I].second.getValueType())); |
1346 | 454 | |
1347 | 454 | // Add a register mask operand representing the call-preserved registers. |
1348 | 454 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); |
1349 | 454 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); |
1350 | 454 | assert(Mask && "Missing call preserved mask for calling convention"); |
1351 | 454 | Ops.push_back(DAG.getRegisterMask(Mask)); |
1352 | 454 | |
1353 | 454 | // Glue the call to the argument copies, if any. |
1354 | 454 | if (Glue.getNode()) |
1355 | 290 | Ops.push_back(Glue); |
1356 | 454 | |
1357 | 454 | // Emit the call. |
1358 | 454 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
1359 | 454 | if (IsTailCall) |
1360 | 64 | return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); |
1361 | 390 | Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); |
1362 | 390 | Glue = Chain.getValue(1); |
1363 | 390 | |
1364 | 390 | // Mark the end of the call, which is glued to the call itself. |
1365 | 390 | Chain = DAG.getCALLSEQ_END(Chain, |
1366 | 390 | DAG.getConstant(NumBytes, DL, PtrVT, true), |
1367 | 390 | DAG.getConstant(0, DL, PtrVT, true), |
1368 | 390 | Glue, DL); |
1369 | 390 | Glue = Chain.getValue(1); |
1370 | 390 | |
1371 | 390 | // Assign locations to each value returned by this call. |
1372 | 390 | SmallVector<CCValAssign, 16> RetLocs; |
1373 | 390 | CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); |
1374 | 390 | RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); |
1375 | 390 | |
1376 | 390 | // Copy all of the result registers out of their specified physreg. |
1377 | 651 | for (unsigned I = 0, E = RetLocs.size(); I != E651 ; ++I261 ) { |
1378 | 261 | CCValAssign &VA = RetLocs[I]; |
1379 | 261 | |
1380 | 261 | // Copy the value out, gluing the copy to the end of the call sequence. |
1381 | 261 | SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), |
1382 | 261 | VA.getLocVT(), Glue); |
1383 | 261 | Chain = RetValue.getValue(1); |
1384 | 261 | Glue = RetValue.getValue(2); |
1385 | 261 | |
1386 | 261 | // Convert the value of the return register into the value that's |
1387 | 261 | // being returned. |
1388 | 261 | InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); |
1389 | 261 | } |
1390 | 454 | |
1391 | 454 | return Chain; |
1392 | 454 | } |
1393 | | |
1394 | | bool SystemZTargetLowering:: |
1395 | | CanLowerReturn(CallingConv::ID CallConv, |
1396 | | MachineFunction &MF, bool isVarArg, |
1397 | | const SmallVectorImpl<ISD::OutputArg> &Outs, |
1398 | 7.04k | LLVMContext &Context) const { |
1399 | 7.04k | // Detect unsupported vector return types. |
1400 | 7.04k | if (Subtarget.hasVector()) |
1401 | 2.40k | VerifyVectorTypes(Outs); |
1402 | 7.04k | |
1403 | 7.04k | // Special case that we cannot easily detect in RetCC_SystemZ since |
1404 | 7.04k | // i128 is not a legal type. |
1405 | 7.04k | for (auto &Out : Outs) |
1406 | 5.73k | if (5.73k Out.ArgVT == MVT::i1285.73k ) |
1407 | 11 | return false; |
1408 | 7.03k | |
1409 | 7.03k | SmallVector<CCValAssign, 16> RetLocs; |
1410 | 7.03k | CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context); |
1411 | 7.03k | return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ); |
1412 | 7.03k | } |
1413 | | |
1414 | | SDValue |
1415 | | SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
1416 | | bool IsVarArg, |
1417 | | const SmallVectorImpl<ISD::OutputArg> &Outs, |
1418 | | const SmallVectorImpl<SDValue> &OutVals, |
1419 | 6.55k | const SDLoc &DL, SelectionDAG &DAG) const { |
1420 | 6.55k | MachineFunction &MF = DAG.getMachineFunction(); |
1421 | 6.55k | |
1422 | 6.55k | // Detect unsupported vector return types. |
1423 | 6.55k | if (Subtarget.hasVector()) |
1424 | 2.34k | VerifyVectorTypes(Outs); |
1425 | 6.55k | |
1426 | 6.55k | // Assign locations to each returned value. |
1427 | 6.55k | SmallVector<CCValAssign, 16> RetLocs; |
1428 | 6.55k | CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); |
1429 | 6.55k | RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); |
1430 | 6.55k | |
1431 | 6.55k | // Quick exit for void returns |
1432 | 6.55k | if (RetLocs.empty()) |
1433 | 1.56k | return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); |
1434 | 4.99k | |
1435 | 4.99k | // Copy the result values into the output registers. |
1436 | 4.99k | SDValue Glue; |
1437 | 4.99k | SmallVector<SDValue, 4> RetOps; |
1438 | 4.99k | RetOps.push_back(Chain); |
1439 | 10.3k | for (unsigned I = 0, E = RetLocs.size(); I != E10.3k ; ++I5.38k ) { |
1440 | 5.38k | CCValAssign &VA = RetLocs[I]; |
1441 | 5.38k | SDValue RetValue = OutVals[I]; |
1442 | 5.38k | |
1443 | 5.38k | // Make the return register live on exit. |
1444 | 5.38k | assert(VA.isRegLoc() && "Can only return in registers!"); |
1445 | 5.38k | |
1446 | 5.38k | // Promote the value as required. |
1447 | 5.38k | RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); |
1448 | 5.38k | |
1449 | 5.38k | // Chain and glue the copies together. |
1450 | 5.38k | unsigned Reg = VA.getLocReg(); |
1451 | 5.38k | Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); |
1452 | 5.38k | Glue = Chain.getValue(1); |
1453 | 5.38k | RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); |
1454 | 5.38k | } |
1455 | 4.99k | |
1456 | 4.99k | // Update chain and glue. |
1457 | 4.99k | RetOps[0] = Chain; |
1458 | 4.99k | if (Glue.getNode()) |
1459 | 4.99k | RetOps.push_back(Glue); |
1460 | 6.55k | |
1461 | 6.55k | return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); |
1462 | 6.55k | } |
1463 | | |
1464 | | // Return true if Op is an intrinsic node with chain that returns the CC value |
1465 | | // as its only (other) argument. Provide the associated SystemZISD opcode and |
1466 | | // the mask of valid CC values if so. |
1467 | | static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, |
1468 | 52 | unsigned &CCValid) { |
1469 | 52 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
1470 | 52 | switch (Id) { |
1471 | 1 | case Intrinsic::s390_tbegin: |
1472 | 1 | Opcode = SystemZISD::TBEGIN; |
1473 | 1 | CCValid = SystemZ::CCMASK_TBEGIN; |
1474 | 1 | return true; |
1475 | 52 | |
1476 | 10 | case Intrinsic::s390_tbegin_nofloat: |
1477 | 10 | Opcode = SystemZISD::TBEGIN_NOFLOAT; |
1478 | 10 | CCValid = SystemZ::CCMASK_TBEGIN; |
1479 | 10 | return true; |
1480 | 52 | |
1481 | 3 | case Intrinsic::s390_tend: |
1482 | 3 | Opcode = SystemZISD::TEND; |
1483 | 3 | CCValid = SystemZ::CCMASK_TEND; |
1484 | 3 | return true; |
1485 | 52 | |
1486 | 38 | default: |
1487 | 38 | return false; |
1488 | 0 | } |
1489 | 0 | } |
1490 | | |
1491 | | // Return true if Op is an intrinsic node without chain that returns the |
1492 | | // CC value as its final argument. Provide the associated SystemZISD |
1493 | | // opcode and the mask of valid CC values if so. |
1494 | 497 | static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { |
1495 | 497 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
1496 | 497 | switch (Id) { |
1497 | 9 | case Intrinsic::s390_vpkshs: |
1498 | 9 | case Intrinsic::s390_vpksfs: |
1499 | 9 | case Intrinsic::s390_vpksgs: |
1500 | 9 | Opcode = SystemZISD::PACKS_CC; |
1501 | 9 | CCValid = SystemZ::CCMASK_VCMP; |
1502 | 9 | return true; |
1503 | 9 | |
1504 | 9 | case Intrinsic::s390_vpklshs: |
1505 | 9 | case Intrinsic::s390_vpklsfs: |
1506 | 9 | case Intrinsic::s390_vpklsgs: |
1507 | 9 | Opcode = SystemZISD::PACKLS_CC; |
1508 | 9 | CCValid = SystemZ::CCMASK_VCMP; |
1509 | 9 | return true; |
1510 | 9 | |
1511 | 16 | case Intrinsic::s390_vceqbs: |
1512 | 16 | case Intrinsic::s390_vceqhs: |
1513 | 16 | case Intrinsic::s390_vceqfs: |
1514 | 16 | case Intrinsic::s390_vceqgs: |
1515 | 16 | Opcode = SystemZISD::VICMPES; |
1516 | 16 | CCValid = SystemZ::CCMASK_VCMP; |
1517 | 16 | return true; |
1518 | 16 | |
1519 | 16 | case Intrinsic::s390_vchbs: |
1520 | 16 | case Intrinsic::s390_vchhs: |
1521 | 16 | case Intrinsic::s390_vchfs: |
1522 | 16 | case Intrinsic::s390_vchgs: |
1523 | 16 | Opcode = SystemZISD::VICMPHS; |
1524 | 16 | CCValid = SystemZ::CCMASK_VCMP; |
1525 | 16 | return true; |
1526 | 16 | |
1527 | 16 | case Intrinsic::s390_vchlbs: |
1528 | 16 | case Intrinsic::s390_vchlhs: |
1529 | 16 | case Intrinsic::s390_vchlfs: |
1530 | 16 | case Intrinsic::s390_vchlgs: |
1531 | 16 | Opcode = SystemZISD::VICMPHLS; |
1532 | 16 | CCValid = SystemZ::CCMASK_VCMP; |
1533 | 16 | return true; |
1534 | 16 | |
1535 | 2 | case Intrinsic::s390_vtm: |
1536 | 2 | Opcode = SystemZISD::VTM; |
1537 | 2 | CCValid = SystemZ::CCMASK_VCMP; |
1538 | 2 | return true; |
1539 | 16 | |
1540 | 3 | case Intrinsic::s390_vfaebs: |
1541 | 3 | case Intrinsic::s390_vfaehs: |
1542 | 3 | case Intrinsic::s390_vfaefs: |
1543 | 3 | Opcode = SystemZISD::VFAE_CC; |
1544 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1545 | 3 | return true; |
1546 | 3 | |
1547 | 3 | case Intrinsic::s390_vfaezbs: |
1548 | 3 | case Intrinsic::s390_vfaezhs: |
1549 | 3 | case Intrinsic::s390_vfaezfs: |
1550 | 3 | Opcode = SystemZISD::VFAEZ_CC; |
1551 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1552 | 3 | return true; |
1553 | 3 | |
1554 | 3 | case Intrinsic::s390_vfeebs: |
1555 | 3 | case Intrinsic::s390_vfeehs: |
1556 | 3 | case Intrinsic::s390_vfeefs: |
1557 | 3 | Opcode = SystemZISD::VFEE_CC; |
1558 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1559 | 3 | return true; |
1560 | 3 | |
1561 | 3 | case Intrinsic::s390_vfeezbs: |
1562 | 3 | case Intrinsic::s390_vfeezhs: |
1563 | 3 | case Intrinsic::s390_vfeezfs: |
1564 | 3 | Opcode = SystemZISD::VFEEZ_CC; |
1565 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1566 | 3 | return true; |
1567 | 3 | |
1568 | 3 | case Intrinsic::s390_vfenebs: |
1569 | 3 | case Intrinsic::s390_vfenehs: |
1570 | 3 | case Intrinsic::s390_vfenefs: |
1571 | 3 | Opcode = SystemZISD::VFENE_CC; |
1572 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1573 | 3 | return true; |
1574 | 3 | |
1575 | 3 | case Intrinsic::s390_vfenezbs: |
1576 | 3 | case Intrinsic::s390_vfenezhs: |
1577 | 3 | case Intrinsic::s390_vfenezfs: |
1578 | 3 | Opcode = SystemZISD::VFENEZ_CC; |
1579 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1580 | 3 | return true; |
1581 | 3 | |
1582 | 3 | case Intrinsic::s390_vistrbs: |
1583 | 3 | case Intrinsic::s390_vistrhs: |
1584 | 3 | case Intrinsic::s390_vistrfs: |
1585 | 3 | Opcode = SystemZISD::VISTR_CC; |
1586 | 3 | CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3; |
1587 | 3 | return true; |
1588 | 3 | |
1589 | 3 | case Intrinsic::s390_vstrcbs: |
1590 | 3 | case Intrinsic::s390_vstrchs: |
1591 | 3 | case Intrinsic::s390_vstrcfs: |
1592 | 3 | Opcode = SystemZISD::VSTRC_CC; |
1593 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1594 | 3 | return true; |
1595 | 3 | |
1596 | 3 | case Intrinsic::s390_vstrczbs: |
1597 | 3 | case Intrinsic::s390_vstrczhs: |
1598 | 3 | case Intrinsic::s390_vstrczfs: |
1599 | 3 | Opcode = SystemZISD::VSTRCZ_CC; |
1600 | 3 | CCValid = SystemZ::CCMASK_ANY; |
1601 | 3 | return true; |
1602 | 3 | |
1603 | 8 | case Intrinsic::s390_vfcedbs: |
1604 | 8 | case Intrinsic::s390_vfcesbs: |
1605 | 8 | Opcode = SystemZISD::VFCMPES; |
1606 | 8 | CCValid = SystemZ::CCMASK_VCMP; |
1607 | 8 | return true; |
1608 | 8 | |
1609 | 8 | case Intrinsic::s390_vfchdbs: |
1610 | 8 | case Intrinsic::s390_vfchsbs: |
1611 | 8 | Opcode = SystemZISD::VFCMPHS; |
1612 | 8 | CCValid = SystemZ::CCMASK_VCMP; |
1613 | 8 | return true; |
1614 | 8 | |
1615 | 8 | case Intrinsic::s390_vfchedbs: |
1616 | 8 | case Intrinsic::s390_vfchesbs: |
1617 | 8 | Opcode = SystemZISD::VFCMPHES; |
1618 | 8 | CCValid = SystemZ::CCMASK_VCMP; |
1619 | 8 | return true; |
1620 | 8 | |
1621 | 4 | case Intrinsic::s390_vftcidb: |
1622 | 4 | case Intrinsic::s390_vftcisb: |
1623 | 4 | Opcode = SystemZISD::VFTCI; |
1624 | 4 | CCValid = SystemZ::CCMASK_VCMP; |
1625 | 4 | return true; |
1626 | 4 | |
1627 | 36 | case Intrinsic::s390_tdc: |
1628 | 36 | Opcode = SystemZISD::TDC; |
1629 | 36 | CCValid = SystemZ::CCMASK_TDC; |
1630 | 36 | return true; |
1631 | 4 | |
1632 | 338 | default: |
1633 | 338 | return false; |
1634 | 0 | } |
1635 | 0 | } |
1636 | | |
1637 | | // Emit an intrinsic with chain with a glued value instead of its CC result. |
1638 | | static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op, |
1639 | 14 | unsigned Opcode) { |
1640 | 14 | // Copy all operands except the intrinsic ID. |
1641 | 14 | unsigned NumOps = Op.getNumOperands(); |
1642 | 14 | SmallVector<SDValue, 6> Ops; |
1643 | 14 | Ops.reserve(NumOps - 1); |
1644 | 14 | Ops.push_back(Op.getOperand(0)); |
1645 | 36 | for (unsigned I = 2; I < NumOps36 ; ++I22 ) |
1646 | 22 | Ops.push_back(Op.getOperand(I)); |
1647 | 14 | |
1648 | 14 | assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); |
1649 | 14 | SDVTList RawVTs = DAG.getVTList(MVT::Other, MVT::Glue); |
1650 | 14 | SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); |
1651 | 14 | SDValue OldChain = SDValue(Op.getNode(), 1); |
1652 | 14 | SDValue NewChain = SDValue(Intr.getNode(), 0); |
1653 | 14 | DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); |
1654 | 14 | return Intr; |
1655 | 14 | } |
1656 | | |
1657 | | // Emit an intrinsic with a glued value instead of its CC result. |
1658 | | static SDValue emitIntrinsicWithGlue(SelectionDAG &DAG, SDValue Op, |
1659 | 159 | unsigned Opcode) { |
1660 | 159 | // Copy all operands except the intrinsic ID. |
1661 | 159 | unsigned NumOps = Op.getNumOperands(); |
1662 | 159 | SmallVector<SDValue, 6> Ops; |
1663 | 159 | Ops.reserve(NumOps - 1); |
1664 | 492 | for (unsigned I = 1; I < NumOps492 ; ++I333 ) |
1665 | 333 | Ops.push_back(Op.getOperand(I)); |
1666 | 159 | |
1667 | 159 | if (Op->getNumValues() == 1) |
1668 | 38 | return DAG.getNode(Opcode, SDLoc(Op), MVT::Glue, Ops); |
1669 | 159 | assert(Op->getNumValues() == 2 && "Expected exactly one non-CC result"); |
1670 | 121 | SDVTList RawVTs = DAG.getVTList(Op->getValueType(0), MVT::Glue); |
1671 | 121 | return DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); |
1672 | 121 | } |
1673 | | |
1674 | | // CC is a comparison that will be implemented using an integer or |
1675 | | // floating-point comparison. Return the condition code mask for |
1676 | | // a branch on true. In the integer case, CCMASK_CMP_UO is set for |
1677 | | // unsigned comparisons and clear for signed ones. In the floating-point |
1678 | | // case, CCMASK_CMP_UO has its normal mask meaning (unordered). |
1679 | 1.41k | static unsigned CCMaskForCondCode(ISD::CondCode CC) { |
1680 | 1.41k | #define CONV(X) \ |
1681 | 778 | case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ |
1682 | 154 | case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ |
1683 | 778 | case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X |
1684 | 1.41k | |
1685 | 1.41k | switch (CC) { |
1686 | 0 | default: |
1687 | 0 | llvm_unreachable("Invalid integer condition!"); |
1688 | 1.41k | |
1689 | 212 | CONV212 (EQ); |
1690 | 259 | CONV259 (NE); |
1691 | 66 | CONV66 (GT); |
1692 | 16 | CONV16 (GE); |
1693 | 219 | CONV219 (LT); |
1694 | 6 | CONV6 (LE); |
1695 | 1.41k | |
1696 | 11 | case ISD::SETO: return SystemZ::CCMASK_CMP_O; |
1697 | 8 | case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; |
1698 | 0 | } |
1699 | 0 | #undef CONV |
1700 | 0 | } |
1701 | | |
1702 | | // Return a sequence for getting a 1 from an IPM result when CC has a |
1703 | | // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. |
1704 | | // The handling of CC values outside CCValid doesn't matter. |
1705 | 200 | static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { |
1706 | 200 | // Deal with cases where the result can be taken directly from a bit |
1707 | 200 | // of the IPM result. |
1708 | 200 | if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) |
1709 | 44 | return IPMConversion(0, 0, SystemZ::IPM_CC); |
1710 | 156 | if (156 CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))156 ) |
1711 | 27 | return IPMConversion(0, 0, SystemZ::IPM_CC + 1); |
1712 | 129 | |
1713 | 129 | // Deal with cases where we can add a value to force the sign bit |
1714 | 129 | // to contain the right value. Putting the bit in 31 means we can |
1715 | 129 | // use SRL rather than RISBG(L), and also makes it easier to get a |
1716 | 129 | // 0/-1 value, so it has priority over the other tests below. |
1717 | 129 | // |
1718 | 129 | // These sequences rely on the fact that the upper two bits of the |
1719 | 129 | // IPM result are zero. |
1720 | 129 | uint64_t TopBit = uint64_t(1) << 31; |
1721 | 129 | if (CCMask == (CCValid & SystemZ::CCMASK_0)) |
1722 | 67 | return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); |
1723 | 62 | if (62 CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))62 ) |
1724 | 10 | return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); |
1725 | 52 | if (52 CCMask == (CCValid & (SystemZ::CCMASK_0 |
1726 | 52 | | SystemZ::CCMASK_1 |
1727 | 52 | | SystemZ::CCMASK_2))) |
1728 | 4 | return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); |
1729 | 48 | if (48 CCMask == (CCValid & SystemZ::CCMASK_3)48 ) |
1730 | 4 | return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); |
1731 | 44 | if (44 CCMask == (CCValid & (SystemZ::CCMASK_1 |
1732 | 44 | | SystemZ::CCMASK_2 |
1733 | 44 | | SystemZ::CCMASK_3))) |
1734 | 15 | return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); |
1735 | 29 | |
1736 | 29 | // Next try inverting the value and testing a bit. 0/1 could be |
1737 | 29 | // handled this way too, but we dealt with that case above. |
1738 | 29 | if (29 CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))29 ) |
1739 | 5 | return IPMConversion(-1, 0, SystemZ::IPM_CC); |
1740 | 24 | |
1741 | 24 | // Handle cases where adding a value forces a non-sign bit to contain |
1742 | 24 | // the right value. |
1743 | 24 | if (24 CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))24 ) |
1744 | 4 | return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); |
1745 | 20 | if (20 CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))20 ) |
1746 | 4 | return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); |
1747 | 16 | |
1748 | 16 | // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are |
1749 | 16 | // can be done by inverting the low CC bit and applying one of the |
1750 | 16 | // sign-based extractions above. |
1751 | 16 | if (16 CCMask == (CCValid & SystemZ::CCMASK_1)16 ) |
1752 | 4 | return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); |
1753 | 12 | if (12 CCMask == (CCValid & SystemZ::CCMASK_2)12 ) |
1754 | 4 | return IPMConversion(1 << SystemZ::IPM_CC, |
1755 | 4 | TopBit - (3 << SystemZ::IPM_CC), 31); |
1756 | 8 | if (8 CCMask == (CCValid & (SystemZ::CCMASK_0 |
1757 | 8 | | SystemZ::CCMASK_1 |
1758 | 8 | | SystemZ::CCMASK_3))) |
1759 | 4 | return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); |
1760 | 4 | if (4 CCMask == (CCValid & (SystemZ::CCMASK_0 |
1761 | 4 | | SystemZ::CCMASK_2 |
1762 | 4 | | SystemZ::CCMASK_3))) |
1763 | 4 | return IPMConversion(1 << SystemZ::IPM_CC, |
1764 | 4 | TopBit - (1 << SystemZ::IPM_CC), 31); |
1765 | 0 |
|
1766 | 0 | llvm_unreachable0 ("Unexpected CC combination"); |
1767 | 0 | } |
1768 | | |
1769 | | // If C can be converted to a comparison against zero, adjust the operands |
1770 | | // as necessary. |
1771 | 1.17k | static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { |
1772 | 1.17k | if (C.ICmpType == SystemZICMP::UnsignedOnly) |
1773 | 349 | return; |
1774 | 826 | |
1775 | 826 | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); |
1776 | 826 | if (!ConstOp1) |
1777 | 189 | return; |
1778 | 637 | |
1779 | 637 | int64_t Value = ConstOp1->getSExtValue(); |
1780 | 637 | if ((Value == -1 && 637 C.CCMask == SystemZ::CCMASK_CMP_GT37 ) || |
1781 | 620 | (Value == -1 && 620 C.CCMask == SystemZ::CCMASK_CMP_LE20 ) || |
1782 | 620 | (Value == 1 && 620 C.CCMask == SystemZ::CCMASK_CMP_LT37 ) || |
1783 | 637 | (Value == 1 && 608 C.CCMask == SystemZ::CCMASK_CMP_GE25 )) { |
1784 | 29 | C.CCMask ^= SystemZ::CCMASK_CMP_EQ; |
1785 | 29 | C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType()); |
1786 | 29 | } |
1787 | 1.17k | } |
1788 | | |
1789 | | // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, |
1790 | | // adjust the operands as necessary. |
1791 | | static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, |
1792 | 1.17k | Comparison &C) { |
1793 | 1.17k | // For us to make any changes, it must a comparison between a single-use |
1794 | 1.17k | // load and a constant. |
1795 | 1.17k | if (!C.Op0.hasOneUse() || |
1796 | 987 | C.Op0.getOpcode() != ISD::LOAD || |
1797 | 242 | C.Op1.getOpcode() != ISD::Constant) |
1798 | 951 | return; |
1799 | 224 | |
1800 | 224 | // We must have an 8- or 16-bit load. |
1801 | 224 | auto *Load = cast<LoadSDNode>(C.Op0); |
1802 | 224 | unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); |
1803 | 224 | if (NumBits != 8 && 224 NumBits != 16145 ) |
1804 | 61 | return; |
1805 | 163 | |
1806 | 163 | // The load must be an extending one and the constant must be within the |
1807 | 163 | // range of the unextended value. |
1808 | 163 | auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); |
1809 | 163 | uint64_t Value = ConstOp1->getZExtValue(); |
1810 | 163 | uint64_t Mask = (1 << NumBits) - 1; |
1811 | 163 | if (Load->getExtensionType() == ISD::SEXTLOAD163 ) { |
1812 | 101 | // Make sure that ConstOp1 is in range of C.Op0. |
1813 | 101 | int64_t SignedValue = ConstOp1->getSExtValue(); |
1814 | 101 | if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) |
1815 | 28 | return; |
1816 | 73 | if (73 C.ICmpType != SystemZICMP::SignedOnly73 ) { |
1817 | 40 | // Unsigned comparison between two sign-extended values is equivalent |
1818 | 40 | // to unsigned comparison between two zero-extended values. |
1819 | 40 | Value &= Mask; |
1820 | 73 | } else if (33 NumBits == 833 ) { |
1821 | 16 | // Try to treat the comparison as unsigned, so that we can use CLI. |
1822 | 16 | // Adjust CCMask and Value as necessary. |
1823 | 16 | if (Value == 0 && 16 C.CCMask == SystemZ::CCMASK_CMP_LT12 ) |
1824 | 16 | // Test whether the high bit of the byte is set. |
1825 | 6 | Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; |
1826 | 10 | else if (10 Value == 0 && 10 C.CCMask == SystemZ::CCMASK_CMP_GE6 ) |
1827 | 10 | // Test whether the high bit of the byte is clear. |
1828 | 6 | Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; |
1829 | 10 | else |
1830 | 10 | // No instruction exists for this combination. |
1831 | 4 | return; |
1832 | 12 | C.ICmpType = SystemZICMP::UnsignedOnly; |
1833 | 12 | } |
1834 | 163 | } else if (62 Load->getExtensionType() == ISD::ZEXTLOAD62 ) { |
1835 | 62 | if (Value > Mask) |
1836 | 0 | return; |
1837 | 62 | // If the constant is in range, we can use any comparison. |
1838 | 62 | C.ICmpType = SystemZICMP::Any; |
1839 | 62 | } else |
1840 | 0 | return; |
1841 | 131 | |
1842 | 131 | // Make sure that the first operand is an i32 of the right extension type. |
1843 | 131 | ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? |
1844 | 17 | ISD::SEXTLOAD : |
1845 | 114 | ISD::ZEXTLOAD); |
1846 | 131 | if (C.Op0.getValueType() != MVT::i32 || |
1847 | 99 | Load->getExtensionType() != ExtType) |
1848 | 60 | C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), |
1849 | 60 | Load->getBasePtr(), Load->getPointerInfo(), |
1850 | 60 | Load->getMemoryVT(), Load->getAlignment(), |
1851 | 60 | Load->getMemOperand()->getFlags()); |
1852 | 131 | |
1853 | 131 | // Make sure that the second operand is an i32 with the right value. |
1854 | 131 | if (C.Op1.getValueType() != MVT::i32 || |
1855 | 99 | Value != ConstOp1->getZExtValue()) |
1856 | 50 | C.Op1 = DAG.getConstant(Value, DL, MVT::i32); |
1857 | 1.17k | } |
1858 | | |
1859 | | // Return true if Op is either an unextended load, or a load suitable |
1860 | | // for integer register-memory comparisons of type ICmpType. |
1861 | 1.79k | static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { |
1862 | 1.79k | auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); |
1863 | 1.79k | if (Load1.79k ) { |
1864 | 443 | // There are no instructions to compare a register with a memory byte. |
1865 | 443 | if (Load->getMemoryVT() == MVT::i8) |
1866 | 74 | return false; |
1867 | 369 | // Otherwise decide on extension type. |
1868 | 369 | switch (Load->getExtensionType()) { |
1869 | 220 | case ISD::NON_EXTLOAD: |
1870 | 220 | return true; |
1871 | 75 | case ISD::SEXTLOAD: |
1872 | 75 | return ICmpType != SystemZICMP::UnsignedOnly; |
1873 | 72 | case ISD::ZEXTLOAD: |
1874 | 72 | return ICmpType != SystemZICMP::SignedOnly; |
1875 | 2 | default: |
1876 | 2 | break; |
1877 | 1.35k | } |
1878 | 1.35k | } |
1879 | 1.35k | return false; |
1880 | 1.35k | } |
1881 | | |
1882 | | // Return true if it is better to swap the operands of C. |
1883 | 1.41k | static bool shouldSwapCmpOperands(const Comparison &C) { |
1884 | 1.41k | // Leave f128 comparisons alone, since they have no memory forms. |
1885 | 1.41k | if (C.Op0.getValueType() == MVT::f128) |
1886 | 7 | return false; |
1887 | 1.40k | |
1888 | 1.40k | // Always keep a floating-point constant second, since comparisons with |
1889 | 1.40k | // zero can use LOAD TEST and comparisons with other constants make a |
1890 | 1.40k | // natural memory operand. |
1891 | 1.40k | if (1.40k isa<ConstantFPSDNode>(C.Op1)1.40k ) |
1892 | 48 | return false; |
1893 | 1.36k | |
1894 | 1.36k | // Never swap comparisons with zero since there are many ways to optimize |
1895 | 1.36k | // those later. |
1896 | 1.36k | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); |
1897 | 1.36k | if (ConstOp1 && 1.36k ConstOp1->getZExtValue() == 0889 ) |
1898 | 359 | return false; |
1899 | 1.00k | |
1900 | 1.00k | // Also keep natural memory operands second if the loaded value is |
1901 | 1.00k | // only used here. Several comparisons have memory forms. |
1902 | 1.00k | if (1.00k isNaturalMemoryOperand(C.Op1, C.ICmpType) && 1.00k C.Op1.hasOneUse()207 ) |
1903 | 204 | return false; |
1904 | 797 | |
1905 | 797 | // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. |
1906 | 797 | // In that case we generally prefer the memory to be second. |
1907 | 797 | if (797 isNaturalMemoryOperand(C.Op0, C.ICmpType) && 797 C.Op0.hasOneUse()148 ) { |
1908 | 126 | // The only exceptions are when the second operand is a constant and |
1909 | 126 | // we can use things like CHHSI. |
1910 | 126 | if (!ConstOp1) |
1911 | 42 | return true; |
1912 | 84 | // The unsigned memory-immediate instructions can handle 16-bit |
1913 | 84 | // unsigned integers. |
1914 | 84 | if (84 C.ICmpType != SystemZICMP::SignedOnly && |
1915 | 63 | isUInt<16>(ConstOp1->getZExtValue())) |
1916 | 49 | return false; |
1917 | 35 | // The signed memory-immediate instructions can handle 16-bit |
1918 | 35 | // signed integers. |
1919 | 35 | if (35 C.ICmpType != SystemZICMP::UnsignedOnly && |
1920 | 33 | isInt<16>(ConstOp1->getSExtValue())) |
1921 | 17 | return false; |
1922 | 18 | return true; |
1923 | 18 | } |
1924 | 671 | |
1925 | 671 | // Try to promote the use of CGFR and CLGFR. |
1926 | 671 | unsigned Opcode0 = C.Op0.getOpcode(); |
1927 | 671 | if (C.ICmpType != SystemZICMP::UnsignedOnly && 671 Opcode0 == ISD::SIGN_EXTEND376 ) |
1928 | 1 | return true; |
1929 | 670 | if (670 C.ICmpType != SystemZICMP::SignedOnly && 670 Opcode0 == ISD::ZERO_EXTEND564 ) |
1930 | 1 | return true; |
1931 | 669 | if (669 C.ICmpType != SystemZICMP::SignedOnly && |
1932 | 563 | Opcode0 == ISD::AND && |
1933 | 31 | C.Op0.getOperand(1).getOpcode() == ISD::Constant && |
1934 | 31 | cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) |
1935 | 1 | return true; |
1936 | 668 | |
1937 | 668 | return false; |
1938 | 668 | } |
1939 | | |
1940 | | // Return a version of comparison CC mask CCMask in which the LT and GT |
1941 | | // actions are swapped. |
1942 | 67 | static unsigned reverseCCMask(unsigned CCMask) { |
1943 | 67 | return ((CCMask & SystemZ::CCMASK_CMP_EQ) | |
1944 | 67 | (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT22 : 045 ) | |
1945 | 67 | (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT41 : 026 ) | |
1946 | 67 | (CCMask & SystemZ::CCMASK_CMP_UO)); |
1947 | 67 | } |
1948 | | |
1949 | | // Check whether C tests for equality between X and Y and whether X - Y |
1950 | | // or Y - X is also computed. In that case it's better to compare the |
1951 | | // result of the subtraction against zero. |
1952 | | static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, |
1953 | 1.17k | Comparison &C) { |
1954 | 1.17k | if (C.CCMask == SystemZ::CCMASK_CMP_EQ || |
1955 | 1.17k | C.CCMask == SystemZ::CCMASK_CMP_NE963 ) { |
1956 | 1.12k | for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E1.12k ; ++I653 ) { |
1957 | 654 | SDNode *N = *I; |
1958 | 654 | if (N->getOpcode() == ISD::SUB && |
1959 | 2 | ((N->getOperand(0) == C.Op0 && 2 N->getOperand(1) == C.Op11 ) || |
1960 | 654 | (N->getOperand(0) == C.Op1 && 1 N->getOperand(1) == C.Op00 ))) { |
1961 | 1 | C.Op0 = SDValue(N, 0); |
1962 | 1 | C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); |
1963 | 1 | return; |
1964 | 1 | } |
1965 | 654 | } |
1966 | 471 | } |
1967 | 1.17k | } |
1968 | | |
1969 | | // Check whether C compares a floating-point value with zero and if that |
1970 | | // floating-point value is also negated. In this case we can use the |
1971 | | // negation to set CC, so avoiding separate LOAD AND TEST and |
1972 | | // LOAD (NEGATIVE/COMPLEMENT) instructions. |
1973 | 240 | static void adjustForFNeg(Comparison &C) { |
1974 | 240 | auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); |
1975 | 240 | if (C1 && 240 C1->isZero()51 ) { |
1976 | 98 | for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E98 ; ++I58 ) { |
1977 | 62 | SDNode *N = *I; |
1978 | 62 | if (N->getOpcode() == ISD::FNEG62 ) { |
1979 | 4 | C.Op0 = SDValue(N, 0); |
1980 | 4 | C.CCMask = reverseCCMask(C.CCMask); |
1981 | 4 | return; |
1982 | 4 | } |
1983 | 62 | } |
1984 | 40 | } |
1985 | 240 | } |
1986 | | |
1987 | | // Check whether C compares (shl X, 32) with 0 and whether X is |
1988 | | // also sign-extended. In that case it is better to test the result |
1989 | | // of the sign extension using LTGFR. |
1990 | | // |
1991 | | // This case is important because InstCombine transforms a comparison |
1992 | | // with (sext (trunc X)) into a comparison with (shl X, 32). |
1993 | 1.17k | static void adjustForLTGFR(Comparison &C) { |
1994 | 1.17k | // Check for a comparison between (shl X, 32) and 0. |
1995 | 1.17k | if (C.Op0.getOpcode() == ISD::SHL && |
1996 | 13 | C.Op0.getValueType() == MVT::i64 && |
1997 | 13 | C.Op1.getOpcode() == ISD::Constant && |
1998 | 1.17k | cast<ConstantSDNode>(C.Op1)->getZExtValue() == 013 ) { |
1999 | 13 | auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); |
2000 | 13 | if (C1 && 13 C1->getZExtValue() == 3213 ) { |
2001 | 13 | SDValue ShlOp0 = C.Op0.getOperand(0); |
2002 | 13 | // See whether X has any SIGN_EXTEND_INREG uses. |
2003 | 13 | for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E13 ; ++I0 ) { |
2004 | 13 | SDNode *N = *I; |
2005 | 13 | if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && |
2006 | 13 | cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i3213 ) { |
2007 | 13 | C.Op0 = SDValue(N, 0); |
2008 | 13 | return; |
2009 | 13 | } |
2010 | 13 | } |
2011 | 13 | } |
2012 | 13 | } |
2013 | 1.17k | } |
2014 | | |
2015 | | // If C compares the truncation of an extending load, try to compare |
2016 | | // the untruncated value instead. This exposes more opportunities to |
2017 | | // reuse CC. |
2018 | | static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, |
2019 | 1.17k | Comparison &C) { |
2020 | 1.17k | if (C.Op0.getOpcode() == ISD::TRUNCATE && |
2021 | 35 | C.Op0.getOperand(0).getOpcode() == ISD::LOAD && |
2022 | 7 | C.Op1.getOpcode() == ISD::Constant && |
2023 | 1.17k | cast<ConstantSDNode>(C.Op1)->getZExtValue() == 07 ) { |
2024 | 7 | auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); |
2025 | 7 | if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()7 ) { |
2026 | 7 | unsigned Type = L->getExtensionType(); |
2027 | 7 | if ((Type == ISD::ZEXTLOAD && 7 C.ICmpType != SystemZICMP::SignedOnly6 ) || |
2028 | 7 | (Type == ISD::SEXTLOAD && 1 C.ICmpType != SystemZICMP::UnsignedOnly1 )) { |
2029 | 7 | C.Op0 = C.Op0.getOperand(0); |
2030 | 7 | C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType()); |
2031 | 7 | } |
2032 | 7 | } |
2033 | 7 | } |
2034 | 1.17k | } |
2035 | | |
2036 | | // Return true if shift operation N has an in-range constant shift value. |
2037 | | // Store it in ShiftVal if so. |
2038 | 17 | static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { |
2039 | 17 | auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); |
2040 | 17 | if (!Shift) |
2041 | 0 | return false; |
2042 | 17 | |
2043 | 17 | uint64_t Amount = Shift->getZExtValue(); |
2044 | 17 | if (Amount >= N.getValueSizeInBits()) |
2045 | 0 | return false; |
2046 | 17 | |
2047 | 17 | ShiftVal = Amount; |
2048 | 17 | return true; |
2049 | 17 | } |
2050 | | |
2051 | | // Check whether an AND with Mask is suitable for a TEST UNDER MASK |
2052 | | // instruction and whether the CC value is descriptive enough to handle |
2053 | | // a comparison of type Opcode between the AND result and CmpVal. |
2054 | | // CCMask says which comparison result is being tested and BitSize is |
2055 | | // the number of bits in the operands. If TEST UNDER MASK can be used, |
2056 | | // return the corresponding CC mask, otherwise return 0. |
2057 | | static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, |
2058 | | uint64_t Mask, uint64_t CmpVal, |
2059 | 171 | unsigned ICmpType) { |
2060 | 171 | assert(Mask != 0 && "ANDs with zero should have been removed by now"); |
2061 | 171 | |
2062 | 171 | // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. |
2063 | 171 | if (!SystemZ::isImmLL(Mask) && 171 !SystemZ::isImmLH(Mask)93 && |
2064 | 171 | !SystemZ::isImmHL(Mask)83 && !SystemZ::isImmHH(Mask)76 ) |
2065 | 58 | return 0; |
2066 | 113 | |
2067 | 113 | // Work out the masks for the lowest and highest bits. |
2068 | 113 | unsigned HighShift = 63 - countLeadingZeros(Mask); |
2069 | 113 | uint64_t High = uint64_t(1) << HighShift; |
2070 | 113 | uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); |
2071 | 113 | |
2072 | 113 | // Signed ordered comparisons are effectively unsigned if the sign |
2073 | 113 | // bit is dropped. |
2074 | 113 | bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); |
2075 | 113 | |
2076 | 113 | // Check for equality comparisons with 0, or the equivalent. |
2077 | 113 | if (CmpVal == 0113 ) { |
2078 | 76 | if (CCMask == SystemZ::CCMASK_CMP_EQ) |
2079 | 28 | return SystemZ::CCMASK_TM_ALL_0; |
2080 | 48 | if (48 CCMask == SystemZ::CCMASK_CMP_NE48 ) |
2081 | 47 | return SystemZ::CCMASK_TM_SOME_1; |
2082 | 38 | } |
2083 | 38 | if (38 EffectivelyUnsigned && 38 CmpVal > 038 && CmpVal <= Low37 ) { |
2084 | 8 | if (CCMask == SystemZ::CCMASK_CMP_LT) |
2085 | 2 | return SystemZ::CCMASK_TM_ALL_0; |
2086 | 6 | if (6 CCMask == SystemZ::CCMASK_CMP_GE6 ) |
2087 | 0 | return SystemZ::CCMASK_TM_SOME_1; |
2088 | 36 | } |
2089 | 36 | if (36 EffectivelyUnsigned && 36 CmpVal < Low36 ) { |
2090 | 3 | if (CCMask == SystemZ::CCMASK_CMP_LE) |
2091 | 0 | return SystemZ::CCMASK_TM_ALL_0; |
2092 | 3 | if (3 CCMask == SystemZ::CCMASK_CMP_GT3 ) |
2093 | 2 | return SystemZ::CCMASK_TM_SOME_1; |
2094 | 34 | } |
2095 | 34 | |
2096 | 34 | // Check for equality comparisons with the mask, or the equivalent. |
2097 | 34 | if (34 CmpVal == Mask34 ) { |
2098 | 12 | if (CCMask == SystemZ::CCMASK_CMP_EQ) |
2099 | 2 | return SystemZ::CCMASK_TM_ALL_1; |
2100 | 10 | if (10 CCMask == SystemZ::CCMASK_CMP_NE10 ) |
2101 | 2 | return SystemZ::CCMASK_TM_SOME_0; |
2102 | 30 | } |
2103 | 30 | if (30 EffectivelyUnsigned && 30 CmpVal >= Mask - Low30 && CmpVal < Mask16 ) { |
2104 | 8 | if (CCMask == SystemZ::CCMASK_CMP_GT) |
2105 | 2 | return SystemZ::CCMASK_TM_ALL_1; |
2106 | 6 | if (6 CCMask == SystemZ::CCMASK_CMP_LE6 ) |
2107 | 0 | return SystemZ::CCMASK_TM_SOME_0; |
2108 | 28 | } |
2109 | 28 | if (28 EffectivelyUnsigned && 28 CmpVal > Mask - Low28 && CmpVal <= Mask10 ) { |
2110 | 10 | if (CCMask == SystemZ::CCMASK_CMP_GE) |
2111 | 4 | return SystemZ::CCMASK_TM_ALL_1; |
2112 | 6 | if (6 CCMask == SystemZ::CCMASK_CMP_LT6 ) |
2113 | 6 | return SystemZ::CCMASK_TM_SOME_0; |
2114 | 18 | } |
2115 | 18 | |
2116 | 18 | // Check for ordered comparisons with the top bit. |
2117 | 18 | if (18 EffectivelyUnsigned && 18 CmpVal >= Mask - High18 && CmpVal < High16 ) { |
2118 | 7 | if (CCMask == SystemZ::CCMASK_CMP_LE) |
2119 | 0 | return SystemZ::CCMASK_TM_MSB_0; |
2120 | 7 | if (7 CCMask == SystemZ::CCMASK_CMP_GT7 ) |
2121 | 2 | return SystemZ::CCMASK_TM_MSB_1; |
2122 | 16 | } |
2123 | 16 | if (16 EffectivelyUnsigned && 16 CmpVal > Mask - High16 && CmpVal <= High10 ) { |
2124 | 5 | if (CCMask == SystemZ::CCMASK_CMP_LT) |
2125 | 2 | return SystemZ::CCMASK_TM_MSB_0; |
2126 | 3 | if (3 CCMask == SystemZ::CCMASK_CMP_GE3 ) |
2127 | 0 | return SystemZ::CCMASK_TM_MSB_1; |
2128 | 14 | } |
2129 | 14 | |
2130 | 14 | // If there are just two bits, we can do equality checks for Low and High |
2131 | 14 | // as well. |
2132 | 14 | if (14 Mask == Low + High14 ) { |
2133 | 6 | if (CCMask == SystemZ::CCMASK_CMP_EQ && 6 CmpVal == Low4 ) |
2134 | 2 | return SystemZ::CCMASK_TM_MIXED_MSB_0; |
2135 | 4 | if (4 CCMask == SystemZ::CCMASK_CMP_NE && 4 CmpVal == Low2 ) |
2136 | 1 | return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; |
2137 | 3 | if (3 CCMask == SystemZ::CCMASK_CMP_EQ && 3 CmpVal == High2 ) |
2138 | 2 | return SystemZ::CCMASK_TM_MIXED_MSB_1; |
2139 | 1 | if (1 CCMask == SystemZ::CCMASK_CMP_NE && 1 CmpVal == High1 ) |
2140 | 1 | return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; |
2141 | 8 | } |
2142 | 8 | |
2143 | 8 | // Looks like we've exhausted our options. |
2144 | 8 | return 0; |
2145 | 8 | } |
2146 | | |
2147 | | // See whether C can be implemented as a TEST UNDER MASK instruction. |
2148 | | // Update the arguments with the TM version if so. |
2149 | | static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, |
2150 | 1.41k | Comparison &C) { |
2151 | 1.41k | // Check that we have a comparison with a constant. |
2152 | 1.41k | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); |
2153 | 1.41k | if (!ConstOp1) |
2154 | 544 | return; |
2155 | 871 | uint64_t CmpVal = ConstOp1->getZExtValue(); |
2156 | 871 | |
2157 | 871 | // Check whether the nonconstant input is an AND with a constant mask. |
2158 | 871 | Comparison NewC(C); |
2159 | 871 | uint64_t MaskVal; |
2160 | 871 | ConstantSDNode *Mask = nullptr; |
2161 | 871 | if (C.Op0.getOpcode() == ISD::AND871 ) { |
2162 | 118 | NewC.Op0 = C.Op0.getOperand(0); |
2163 | 118 | NewC.Op1 = C.Op0.getOperand(1); |
2164 | 118 | Mask = dyn_cast<ConstantSDNode>(NewC.Op1); |
2165 | 118 | if (!Mask) |
2166 | 2 | return; |
2167 | 116 | MaskVal = Mask->getZExtValue(); |
2168 | 871 | } else { |
2169 | 753 | // There is no instruction to compare with a 64-bit immediate |
2170 | 753 | // so use TMHH instead if possible. We need an unsigned ordered |
2171 | 753 | // comparison with an i64 immediate. |
2172 | 753 | if (NewC.Op0.getValueType() != MVT::i64 || |
2173 | 207 | NewC.CCMask == SystemZ::CCMASK_CMP_EQ || |
2174 | 167 | NewC.CCMask == SystemZ::CCMASK_CMP_NE || |
2175 | 108 | NewC.ICmpType == SystemZICMP::SignedOnly) |
2176 | 700 | return; |
2177 | 53 | // Convert LE and GT comparisons into LT and GE. |
2178 | 53 | if (53 NewC.CCMask == SystemZ::CCMASK_CMP_LE || |
2179 | 53 | NewC.CCMask == SystemZ::CCMASK_CMP_GT53 ) { |
2180 | 18 | if (CmpVal == uint64_t(-1)) |
2181 | 0 | return; |
2182 | 18 | CmpVal += 1; |
2183 | 18 | NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; |
2184 | 18 | } |
2185 | 53 | // If the low N bits of Op1 are zero than the low N bits of Op0 can |
2186 | 53 | // be masked off without changing the result. |
2187 | 53 | MaskVal = -(CmpVal & -CmpVal); |
2188 | 53 | NewC.ICmpType = SystemZICMP::UnsignedOnly; |
2189 | 53 | } |
2190 | 169 | if (169 !MaskVal169 ) |
2191 | 1 | return; |
2192 | 168 | |
2193 | 168 | // Check whether the combination of mask, comparison value and comparison |
2194 | 168 | // type are suitable. |
2195 | 168 | unsigned BitSize = NewC.Op0.getValueSizeInBits(); |
2196 | 168 | unsigned NewCCMask, ShiftVal; |
2197 | 168 | if (NewC.ICmpType != SystemZICMP::SignedOnly && |
2198 | 167 | NewC.Op0.getOpcode() == ISD::SHL && |
2199 | 3 | isSimpleShift(NewC.Op0, ShiftVal) && |
2200 | 3 | (MaskVal >> ShiftVal != 0) && |
2201 | 3 | (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, |
2202 | 3 | MaskVal >> ShiftVal, |
2203 | 3 | CmpVal >> ShiftVal, |
2204 | 168 | SystemZICMP::Any))) { |
2205 | 3 | NewC.Op0 = NewC.Op0.getOperand(0); |
2206 | 3 | MaskVal >>= ShiftVal; |
2207 | 168 | } else if (165 NewC.ICmpType != SystemZICMP::SignedOnly && |
2208 | 164 | NewC.Op0.getOpcode() == ISD::SRL && |
2209 | 14 | isSimpleShift(NewC.Op0, ShiftVal) && |
2210 | 14 | (MaskVal << ShiftVal != 0) && |
2211 | 14 | (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, |
2212 | 14 | MaskVal << ShiftVal, |
2213 | 14 | CmpVal << ShiftVal, |
2214 | 165 | SystemZICMP::UnsignedOnly))) { |
2215 | 11 | NewC.Op0 = NewC.Op0.getOperand(0); |
2216 | 11 | MaskVal <<= ShiftVal; |
2217 | 165 | } else { |
2218 | 154 | NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, |
2219 | 154 | NewC.ICmpType); |
2220 | 154 | if (!NewCCMask) |
2221 | 63 | return; |
2222 | 105 | } |
2223 | 105 | |
2224 | 105 | // Go ahead and make the change. |
2225 | 105 | C.Opcode = SystemZISD::TM; |
2226 | 105 | C.Op0 = NewC.Op0; |
2227 | 105 | if (Mask && 105 Mask->getZExtValue() == MaskVal97 ) |
2228 | 91 | C.Op1 = SDValue(Mask, 0); |
2229 | 105 | else |
2230 | 14 | C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType()); |
2231 | 1.41k | C.CCValid = SystemZ::CCMASK_TM; |
2232 | 1.41k | C.CCMask = NewCCMask; |
2233 | 1.41k | } |
2234 | | |
2235 | | // Return a Comparison that tests the condition-code result of intrinsic |
2236 | | // node Call against constant integer CC using comparison code Cond. |
2237 | | // Opcode is the opcode of the SystemZISD operation for the intrinsic |
2238 | | // and CCValid is the set of possible condition-code results. |
2239 | | static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, |
2240 | | SDValue Call, unsigned CCValid, uint64_t CC, |
2241 | 79 | ISD::CondCode Cond) { |
2242 | 79 | Comparison C(Call, SDValue()); |
2243 | 79 | C.Opcode = Opcode; |
2244 | 79 | C.CCValid = CCValid; |
2245 | 79 | if (Cond == ISD::SETEQ) |
2246 | 79 | // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3. |
2247 | 16 | C.CCMask = CC < 4 ? 16 1 << (3 - CC)16 : 00 ; |
2248 | 63 | else if (63 Cond == ISD::SETNE63 ) |
2249 | 63 | // ...and the inverse of that. |
2250 | 42 | C.CCMask = CC < 4 ? 42 ~(1 << (3 - CC))42 : -10 ; |
2251 | 21 | else if (21 Cond == ISD::SETLT || 21 Cond == ISD::SETULT17 ) |
2252 | 21 | // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3, |
2253 | 21 | // always true for CC>3. |
2254 | 9 | C.CCMask = CC < 4 ? 9 ~0U << (4 - CC)9 : -10 ; |
2255 | 12 | else if (12 Cond == ISD::SETGE || 12 Cond == ISD::SETUGE12 ) |
2256 | 12 | // ...and the inverse of that. |
2257 | 0 | C.CCMask = CC < 4 ? 0 ~(~0U << (4 - CC))0 : 00 ; |
2258 | 12 | else if (12 Cond == ISD::SETLE || 12 Cond == ISD::SETULE12 ) |
2259 | 12 | // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true), |
2260 | 12 | // always true for CC>3. |
2261 | 0 | C.CCMask = CC < 4 ? 0 ~0U << (3 - CC)0 : -10 ; |
2262 | 12 | else if (12 Cond == ISD::SETGT || 12 Cond == ISD::SETUGT4 ) |
2263 | 12 | // ...and the inverse of that. |
2264 | 12 | C.CCMask = CC < 4 ? 12 ~(~0U << (3 - CC))12 : 00 ; |
2265 | 12 | else |
2266 | 0 | llvm_unreachable("Unexpected integer comparison type"); |
2267 | 79 | C.CCMask &= CCValid; |
2268 | 79 | return C; |
2269 | 79 | } |
2270 | | |
2271 | | // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. |
2272 | | static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, |
2273 | 1.49k | ISD::CondCode Cond, const SDLoc &DL) { |
2274 | 1.49k | if (CmpOp1.getOpcode() == ISD::Constant1.49k ) { |
2275 | 967 | uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue(); |
2276 | 967 | unsigned Opcode, CCValid; |
2277 | 967 | if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN && |
2278 | 967 | CmpOp0.getResNo() == 04 && CmpOp0->hasNUsesOfValue(1, 0)4 && |
2279 | 2 | isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid)) |
2280 | 2 | return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); |
2281 | 965 | if (965 CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && |
2282 | 77 | CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && |
2283 | 77 | isIntrinsicWithCC(CmpOp0, Opcode, CCValid)) |
2284 | 77 | return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); |
2285 | 1.41k | } |
2286 | 1.41k | Comparison C(CmpOp0, CmpOp1); |
2287 | 1.41k | C.CCMask = CCMaskForCondCode(Cond); |
2288 | 1.41k | if (C.Op0.getValueType().isFloatingPoint()1.41k ) { |
2289 | 240 | C.CCValid = SystemZ::CCMASK_FCMP; |
2290 | 240 | C.Opcode = SystemZISD::FCMP; |
2291 | 240 | adjustForFNeg(C); |
2292 | 1.41k | } else { |
2293 | 1.17k | C.CCValid = SystemZ::CCMASK_ICMP; |
2294 | 1.17k | C.Opcode = SystemZISD::ICMP; |
2295 | 1.17k | // Choose the type of comparison. Equality and inequality tests can |
2296 | 1.17k | // use either signed or unsigned comparisons. The choice also doesn't |
2297 | 1.17k | // matter if both sign bits are known to be clear. In those cases we |
2298 | 1.17k | // want to give the main isel code the freedom to choose whichever |
2299 | 1.17k | // form fits best. |
2300 | 1.17k | if (C.CCMask == SystemZ::CCMASK_CMP_EQ || |
2301 | 963 | C.CCMask == SystemZ::CCMASK_CMP_NE || |
2302 | 704 | (DAG.SignBitIsZero(C.Op0) && 704 DAG.SignBitIsZero(C.Op1)75 )) |
2303 | 537 | C.ICmpType = SystemZICMP::Any; |
2304 | 638 | else if (638 C.CCMask & SystemZ::CCMASK_CMP_UO638 ) |
2305 | 349 | C.ICmpType = SystemZICMP::UnsignedOnly; |
2306 | 638 | else |
2307 | 289 | C.ICmpType = SystemZICMP::SignedOnly; |
2308 | 1.17k | C.CCMask &= ~SystemZ::CCMASK_CMP_UO; |
2309 | 1.17k | adjustZeroCmp(DAG, DL, C); |
2310 | 1.17k | adjustSubwordCmp(DAG, DL, C); |
2311 | 1.17k | adjustForSubtraction(DAG, DL, C); |
2312 | 1.17k | adjustForLTGFR(C); |
2313 | 1.17k | adjustICmpTruncate(DAG, DL, C); |
2314 | 1.17k | } |
2315 | 1.41k | |
2316 | 1.41k | if (shouldSwapCmpOperands(C)1.41k ) { |
2317 | 63 | std::swap(C.Op0, C.Op1); |
2318 | 63 | C.CCMask = reverseCCMask(C.CCMask); |
2319 | 63 | } |
2320 | 1.49k | |
2321 | 1.49k | adjustForTestUnderMask(DAG, DL, C); |
2322 | 1.49k | return C; |
2323 | 1.49k | } |
2324 | | |
2325 | | // Emit the comparison instruction described by C. |
2326 | 1.47k | static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { |
2327 | 1.47k | if (!C.Op1.getNode()1.47k ) { |
2328 | 79 | SDValue Op; |
2329 | 79 | switch (C.Op0.getOpcode()) { |
2330 | 2 | case ISD::INTRINSIC_W_CHAIN: |
2331 | 2 | Op = emitIntrinsicWithChainAndGlue(DAG, C.Op0, C.Opcode); |
2332 | 2 | break; |
2333 | 77 | case ISD::INTRINSIC_WO_CHAIN: |
2334 | 77 | Op = emitIntrinsicWithGlue(DAG, C.Op0, C.Opcode); |
2335 | 77 | break; |
2336 | 0 | default: |
2337 | 0 | llvm_unreachable("Invalid comparison operands"); |
2338 | 79 | } |
2339 | 79 | return SDValue(Op.getNode(), Op->getNumValues() - 1); |
2340 | 79 | } |
2341 | 1.40k | if (1.40k C.Opcode == SystemZISD::ICMP1.40k ) |
2342 | 1.05k | return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, |
2343 | 1.05k | DAG.getConstant(C.ICmpType, DL, MVT::i32)); |
2344 | 345 | if (345 C.Opcode == SystemZISD::TM345 ) { |
2345 | 105 | bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != |
2346 | 105 | bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); |
2347 | 105 | return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, |
2348 | 105 | DAG.getConstant(RegisterOnly, DL, MVT::i32)); |
2349 | 105 | } |
2350 | 240 | return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); |
2351 | 240 | } |
2352 | | |
2353 | | // Implement a 32-bit *MUL_LOHI operation by extending both operands to |
2354 | | // 64 bits. Extend is the extension type to use. Store the high part |
2355 | | // in Hi and the low part in Lo. |
2356 | | static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, |
2357 | | SDValue Op0, SDValue Op1, SDValue &Hi, |
2358 | 0 | SDValue &Lo) { |
2359 | 0 | Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); |
2360 | 0 | Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); |
2361 | 0 | SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); |
2362 | 0 | Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, |
2363 | 0 | DAG.getConstant(32, DL, MVT::i64)); |
2364 | 0 | Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); |
2365 | 0 | Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); |
2366 | 0 | } |
2367 | | |
2368 | | // Lower a binary operation that produces two VT results, one in each |
2369 | | // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, |
2370 | | // and Opcode performs the GR128 operation. Store the even register result |
2371 | | // in Even and the odd register result in Odd. |
2372 | | static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
2373 | | unsigned Opcode, SDValue Op0, SDValue Op1, |
2374 | 215 | SDValue &Even, SDValue &Odd) { |
2375 | 215 | SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); |
2376 | 215 | bool Is32Bit = is32Bit(VT); |
2377 | 215 | Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); |
2378 | 215 | Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); |
2379 | 215 | } |
2380 | | |
2381 | | // Return an i32 value that is 1 if the CC value produced by Glue is |
2382 | | // in the mask CCMask and 0 otherwise. CC is known to have a value |
2383 | | // in CCValid, so other values can be ignored. |
2384 | | static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue, |
2385 | 200 | unsigned CCValid, unsigned CCMask) { |
2386 | 200 | IPMConversion Conversion = getIPMConversion(CCValid, CCMask); |
2387 | 200 | SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); |
2388 | 200 | |
2389 | 200 | if (Conversion.XORValue) |
2390 | 21 | Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, |
2391 | 21 | DAG.getConstant(Conversion.XORValue, DL, MVT::i32)); |
2392 | 200 | |
2393 | 200 | if (Conversion.AddValue) |
2394 | 124 | Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, |
2395 | 124 | DAG.getConstant(Conversion.AddValue, DL, MVT::i32)); |
2396 | 200 | |
2397 | 200 | // The SHR/AND sequence should get optimized to an RISBG. |
2398 | 200 | Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, |
2399 | 200 | DAG.getConstant(Conversion.Bit, DL, MVT::i32)); |
2400 | 200 | if (Conversion.Bit != 31) |
2401 | 84 | Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, |
2402 | 84 | DAG.getConstant(1, DL, MVT::i32)); |
2403 | 200 | return Result; |
2404 | 200 | } |
2405 | | |
2406 | | // Return the SystemISD vector comparison operation for CC, or 0 if it cannot |
2407 | | // be done directly. IsFP is true if CC is for a floating-point rather than |
2408 | | // integer comparison. |
2409 | 1.51k | static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) { |
2410 | 1.51k | switch (CC) { |
2411 | 654 | case ISD::SETOEQ: |
2412 | 654 | case ISD::SETEQ: |
2413 | 654 | return IsFP ? SystemZISD::VFCMPE12 : SystemZISD::VICMPE642 ; |
2414 | 654 | |
2415 | 67 | case ISD::SETOGE: |
2416 | 67 | case ISD::SETGE: |
2417 | 67 | return IsFP ? SystemZISD::VFCMPHE24 : static_cast<SystemZISD::NodeType>(0)43 ; |
2418 | 67 | |
2419 | 453 | case ISD::SETOGT: |
2420 | 453 | case ISD::SETGT: |
2421 | 453 | return IsFP ? SystemZISD::VFCMPH370 : SystemZISD::VICMPH83 ; |
2422 | 453 | |
2423 | 78 | case ISD::SETUGT: |
2424 | 78 | return IsFP ? static_cast<SystemZISD::NodeType>(0)12 : SystemZISD::VICMPHL66 ; |
2425 | 453 | |
2426 | 259 | default: |
2427 | 259 | return 0; |
2428 | 0 | } |
2429 | 0 | } |
2430 | | |
2431 | | // Return the SystemZISD vector comparison operation for CC or its inverse, |
2432 | | // or 0 if neither can be done directly. Indicate in Invert whether the |
2433 | | // result is for the inverse of CC. IsFP is true if CC is for a |
2434 | | // floating-point rather than integer comparison. |
2435 | | static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP, |
2436 | 1.29k | bool &Invert) { |
2437 | 1.29k | if (unsigned Opcode1.29k = getVectorComparison(CC, IsFP)) { |
2438 | 1.08k | Invert = false; |
2439 | 1.08k | return Opcode; |
2440 | 1.08k | } |
2441 | 214 | |
2442 | 214 | CC = ISD::getSetCCInverse(CC, !IsFP); |
2443 | 214 | if (unsigned Opcode214 = getVectorComparison(CC, IsFP)) { |
2444 | 114 | Invert = true; |
2445 | 114 | return Opcode; |
2446 | 114 | } |
2447 | 100 | |
2448 | 100 | return 0; |
2449 | 100 | } |
2450 | | |
2451 | | // Return a v2f64 that contains the extended form of elements Start and Start+1 |
2452 | | // of v4f32 value Op. |
2453 | | static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, |
2454 | 696 | SDValue Op) { |
2455 | 696 | int Mask[] = { Start, -1, Start + 1, -1 }; |
2456 | 696 | Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask); |
2457 | 696 | return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op); |
2458 | 696 | } |
2459 | | |
2460 | | // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, |
2461 | | // producing a result of type VT. |
2462 | | SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, |
2463 | | const SDLoc &DL, EVT VT, |
2464 | | SDValue CmpOp0, |
2465 | 1.24k | SDValue CmpOp1) const { |
2466 | 1.24k | // There is no hardware support for v4f32 (unless we have the vector |
2467 | 1.24k | // enhancements facility 1), so extend the vector into two v2f64s |
2468 | 1.24k | // and compare those. |
2469 | 1.24k | if (CmpOp0.getValueType() == MVT::v4f32 && |
2470 | 1.24k | !Subtarget.hasVectorEnhancements1()210 ) { |
2471 | 174 | SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0); |
2472 | 174 | SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0); |
2473 | 174 | SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1); |
2474 | 174 | SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1); |
2475 | 174 | SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1); |
2476 | 174 | SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1); |
2477 | 174 | return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); |
2478 | 174 | } |
2479 | 1.07k | return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); |
2480 | 1.07k | } |
2481 | | |
2482 | | // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing |
2483 | | // an integer mask of type VT. |
2484 | | SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, |
2485 | | const SDLoc &DL, EVT VT, |
2486 | | ISD::CondCode CC, |
2487 | | SDValue CmpOp0, |
2488 | 1.22k | SDValue CmpOp1) const { |
2489 | 1.22k | bool IsFP = CmpOp0.getValueType().isFloatingPoint(); |
2490 | 1.22k | bool Invert = false; |
2491 | 1.22k | SDValue Cmp; |
2492 | 1.22k | switch (CC) { |
2493 | 1.22k | // Handle tests for order using (or (ogt y x) (oge x y)). |
2494 | 6 | case ISD::SETUO: |
2495 | 6 | Invert = true; |
2496 | 6 | LLVM_FALLTHROUGH; |
2497 | 12 | case ISD::SETO: { |
2498 | 12 | assert(IsFP && "Unexpected integer comparison"); |
2499 | 12 | SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); |
2500 | 12 | SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1); |
2501 | 12 | Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); |
2502 | 12 | break; |
2503 | 6 | } |
2504 | 6 | |
2505 | 6 | // Handle <> tests using (or (ogt y x) (ogt x y)). |
2506 | 6 | case ISD::SETUEQ: |
2507 | 6 | Invert = true; |
2508 | 6 | LLVM_FALLTHROUGH; |
2509 | 12 | case ISD::SETONE: { |
2510 | 12 | assert(IsFP && "Unexpected integer comparison"); |
2511 | 12 | SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); |
2512 | 12 | SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1); |
2513 | 12 | Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); |
2514 | 12 | break; |
2515 | 6 | } |
2516 | 6 | |
2517 | 6 | // Otherwise a single comparison is enough. It doesn't really |
2518 | 6 | // matter whether we try the inversion or the swap first, since |
2519 | 6 | // there are no cases where both work. |
2520 | 1.19k | default: |
2521 | 1.19k | if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) |
2522 | 1.09k | Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1); |
2523 | 100 | else { |
2524 | 100 | CC = ISD::getSetCCSwappedOperands(CC); |
2525 | 100 | if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) |
2526 | 100 | Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0); |
2527 | 100 | else |
2528 | 0 | llvm_unreachable("Unhandled comparison"); |
2529 | 100 | } |
2530 | 1.19k | break; |
2531 | 1.22k | } |
2532 | 1.22k | if (1.22k Invert1.22k ) { |
2533 | 126 | SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, |
2534 | 126 | DAG.getConstant(65535, DL, MVT::i32)); |
2535 | 126 | Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask); |
2536 | 126 | Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); |
2537 | 126 | } |
2538 | 1.22k | return Cmp; |
2539 | 1.22k | } |
2540 | | |
2541 | | SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, |
2542 | 1.32k | SelectionDAG &DAG) const { |
2543 | 1.32k | SDValue CmpOp0 = Op.getOperand(0); |
2544 | 1.32k | SDValue CmpOp1 = Op.getOperand(1); |
2545 | 1.32k | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); |
2546 | 1.32k | SDLoc DL(Op); |
2547 | 1.32k | EVT VT = Op.getValueType(); |
2548 | 1.32k | if (VT.isVector()) |
2549 | 1.22k | return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); |
2550 | 101 | |
2551 | 101 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); |
2552 | 101 | SDValue Glue = emitCmp(DAG, DL, C); |
2553 | 101 | return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); |
2554 | 101 | } |
2555 | | |
2556 | 553 | SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
2557 | 553 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
2558 | 553 | SDValue CmpOp0 = Op.getOperand(2); |
2559 | 553 | SDValue CmpOp1 = Op.getOperand(3); |
2560 | 553 | SDValue Dest = Op.getOperand(4); |
2561 | 553 | SDLoc DL(Op); |
2562 | 553 | |
2563 | 553 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); |
2564 | 553 | SDValue Glue = emitCmp(DAG, DL, C); |
2565 | 553 | return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), |
2566 | 553 | Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32), |
2567 | 553 | DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue); |
2568 | 553 | } |
2569 | | |
2570 | | // Return true if Pos is CmpOp and Neg is the negative of CmpOp, |
2571 | | // allowing Pos and Neg to be wider than CmpOp. |
2572 | 77 | static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { |
2573 | 77 | return (Neg.getOpcode() == ISD::SUB && |
2574 | 15 | Neg.getOperand(0).getOpcode() == ISD::Constant && |
2575 | 15 | cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && |
2576 | 15 | Neg.getOperand(1) == Pos && |
2577 | 15 | (Pos == CmpOp || |
2578 | 3 | (Pos.getOpcode() == ISD::SIGN_EXTEND && |
2579 | 15 | Pos.getOperand(0) == CmpOp))); |
2580 | 77 | } |
2581 | | |
2582 | | // Return the absolute or negative absolute of Op; IsNegative decides which. |
2583 | | static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, |
2584 | 15 | bool IsNegative) { |
2585 | 15 | Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); |
2586 | 15 | if (IsNegative) |
2587 | 5 | Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), |
2588 | 5 | DAG.getConstant(0, DL, Op.getValueType()), Op); |
2589 | 15 | return Op; |
2590 | 15 | } |
2591 | | |
2592 | | SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, |
2593 | 840 | SelectionDAG &DAG) const { |
2594 | 840 | SDValue CmpOp0 = Op.getOperand(0); |
2595 | 840 | SDValue CmpOp1 = Op.getOperand(1); |
2596 | 840 | SDValue TrueOp = Op.getOperand(2); |
2597 | 840 | SDValue FalseOp = Op.getOperand(3); |
2598 | 840 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
2599 | 840 | SDLoc DL(Op); |
2600 | 840 | |
2601 | 840 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); |
2602 | 840 | |
2603 | 840 | // Check for absolute and negative-absolute selections, including those |
2604 | 840 | // where the comparison value is sign-extended (for LPGFR and LNGFR). |
2605 | 840 | // This check supplements the one in DAGCombiner. |
2606 | 840 | if (C.Opcode == SystemZISD::ICMP && |
2607 | 651 | C.CCMask != SystemZ::CCMASK_CMP_EQ && |
2608 | 548 | C.CCMask != SystemZ::CCMASK_CMP_NE && |
2609 | 477 | C.Op1.getOpcode() == ISD::Constant && |
2610 | 840 | cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0354 ) { |
2611 | 42 | if (isAbsolute(C.Op0, TrueOp, FalseOp)) |
2612 | 7 | return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); |
2613 | 35 | if (35 isAbsolute(C.Op0, FalseOp, TrueOp)35 ) |
2614 | 8 | return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); |
2615 | 825 | } |
2616 | 825 | |
2617 | 825 | SDValue Glue = emitCmp(DAG, DL, C); |
2618 | 825 | |
2619 | 825 | // Special case for handling -1/0 results. The shifts we use here |
2620 | 825 | // should get optimized with the IPM conversion sequence. |
2621 | 825 | auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp); |
2622 | 825 | auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp); |
2623 | 825 | if (TrueC && 825 FalseC85 ) { |
2624 | 81 | int64_t TrueVal = TrueC->getSExtValue(); |
2625 | 81 | int64_t FalseVal = FalseC->getSExtValue(); |
2626 | 81 | if ((TrueVal == -1 && 81 FalseVal == 051 ) || (TrueVal == 0 && 31 FalseVal == -12 )) { |
2627 | 50 | // Invert the condition if we want -1 on false. |
2628 | 50 | if (TrueVal == 0) |
2629 | 0 | C.CCMask ^= C.CCValid; |
2630 | 50 | SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); |
2631 | 50 | EVT VT = Op.getValueType(); |
2632 | 50 | // Extend the result to VT. Upper bits are ignored. |
2633 | 50 | if (!is32Bit(VT)) |
2634 | 17 | Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); |
2635 | 50 | // Sign-extend from the low bit. |
2636 | 50 | SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32); |
2637 | 50 | SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); |
2638 | 50 | return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); |
2639 | 50 | } |
2640 | 775 | } |
2641 | 775 | |
2642 | 775 | SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32), |
2643 | 775 | DAG.getConstant(C.CCMask, DL, MVT::i32), Glue}; |
2644 | 775 | |
2645 | 775 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); |
2646 | 775 | return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops); |
2647 | 775 | } |
2648 | | |
2649 | | SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, |
2650 | 350 | SelectionDAG &DAG) const { |
2651 | 350 | SDLoc DL(Node); |
2652 | 350 | const GlobalValue *GV = Node->getGlobal(); |
2653 | 350 | int64_t Offset = Node->getOffset(); |
2654 | 350 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2655 | 350 | CodeModel::Model CM = DAG.getTarget().getCodeModel(); |
2656 | 350 | |
2657 | 350 | SDValue Result; |
2658 | 350 | if (Subtarget.isPC32DBLSymbol(GV, CM)350 ) { |
2659 | 318 | // Assign anchors at 1<<12 byte boundaries. |
2660 | 318 | uint64_t Anchor = Offset & ~uint64_t(0xfff); |
2661 | 318 | Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); |
2662 | 318 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
2663 | 318 | |
2664 | 318 | // The offset can be folded into the address if it is aligned to a halfword. |
2665 | 318 | Offset -= Anchor; |
2666 | 318 | if (Offset != 0 && 318 (Offset & 1) == 05 ) { |
2667 | 3 | SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); |
2668 | 3 | Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); |
2669 | 3 | Offset = 0; |
2670 | 3 | } |
2671 | 350 | } else { |
2672 | 32 | Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); |
2673 | 32 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
2674 | 32 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, |
2675 | 32 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
2676 | 32 | } |
2677 | 350 | |
2678 | 350 | // If there was a non-zero offset that we didn't fold, create an explicit |
2679 | 350 | // addition for it. |
2680 | 350 | if (Offset != 0) |
2681 | 2 | Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, |
2682 | 2 | DAG.getConstant(Offset, DL, PtrVT)); |
2683 | 350 | |
2684 | 350 | return Result; |
2685 | 350 | } |
2686 | | |
2687 | | SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, |
2688 | | SelectionDAG &DAG, |
2689 | | unsigned Opcode, |
2690 | 11 | SDValue GOTOffset) const { |
2691 | 11 | SDLoc DL(Node); |
2692 | 11 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2693 | 11 | SDValue Chain = DAG.getEntryNode(); |
2694 | 11 | SDValue Glue; |
2695 | 11 | |
2696 | 11 | // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. |
2697 | 11 | SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); |
2698 | 11 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); |
2699 | 11 | Glue = Chain.getValue(1); |
2700 | 11 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); |
2701 | 11 | Glue = Chain.getValue(1); |
2702 | 11 | |
2703 | 11 | // The first call operand is the chain and the second is the TLS symbol. |
2704 | 11 | SmallVector<SDValue, 8> Ops; |
2705 | 11 | Ops.push_back(Chain); |
2706 | 11 | Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, |
2707 | 11 | Node->getValueType(0), |
2708 | 11 | 0, 0)); |
2709 | 11 | |
2710 | 11 | // Add argument registers to the end of the list so that they are |
2711 | 11 | // known live into the call. |
2712 | 11 | Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); |
2713 | 11 | Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); |
2714 | 11 | |
2715 | 11 | // Add a register mask operand representing the call-preserved registers. |
2716 | 11 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); |
2717 | 11 | const uint32_t *Mask = |
2718 | 11 | TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); |
2719 | 11 | assert(Mask && "Missing call preserved mask for calling convention"); |
2720 | 11 | Ops.push_back(DAG.getRegisterMask(Mask)); |
2721 | 11 | |
2722 | 11 | // Glue the call to the argument copies. |
2723 | 11 | Ops.push_back(Glue); |
2724 | 11 | |
2725 | 11 | // Emit the call. |
2726 | 11 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
2727 | 11 | Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); |
2728 | 11 | Glue = Chain.getValue(1); |
2729 | 11 | |
2730 | 11 | // Copy the return value from %r2. |
2731 | 11 | return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); |
2732 | 11 | } |
2733 | | |
2734 | | SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL, |
2735 | 15 | SelectionDAG &DAG) const { |
2736 | 15 | SDValue Chain = DAG.getEntryNode(); |
2737 | 15 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2738 | 15 | |
2739 | 15 | // The high part of the thread pointer is in access register 0. |
2740 | 15 | SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32); |
2741 | 15 | TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); |
2742 | 15 | |
2743 | 15 | // The low part of the thread pointer is in access register 1. |
2744 | 15 | SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32); |
2745 | 15 | TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); |
2746 | 15 | |
2747 | 15 | // Merge them into a single 64-bit address. |
2748 | 15 | SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, |
2749 | 15 | DAG.getConstant(32, DL, PtrVT)); |
2750 | 15 | return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); |
2751 | 15 | } |
2752 | | |
2753 | | SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, |
2754 | 14 | SelectionDAG &DAG) const { |
2755 | 14 | if (DAG.getTarget().Options.EmulatedTLS) |
2756 | 0 | return LowerToTLSEmulatedModel(Node, DAG); |
2757 | 14 | SDLoc DL(Node); |
2758 | 14 | const GlobalValue *GV = Node->getGlobal(); |
2759 | 14 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2760 | 14 | TLSModel::Model model = DAG.getTarget().getTLSModel(GV); |
2761 | 14 | |
2762 | 14 | SDValue TP = lowerThreadPointer(DL, DAG); |
2763 | 14 | |
2764 | 14 | // Get the offset of GA from the thread pointer, based on the TLS model. |
2765 | 14 | SDValue Offset; |
2766 | 14 | switch (model) { |
2767 | 7 | case TLSModel::GeneralDynamic: { |
2768 | 7 | // Load the GOT offset of the tls_index (module ID / per-symbol offset). |
2769 | 7 | SystemZConstantPoolValue *CPV = |
2770 | 7 | SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); |
2771 | 7 | |
2772 | 7 | Offset = DAG.getConstantPool(CPV, PtrVT, 8); |
2773 | 7 | Offset = DAG.getLoad( |
2774 | 7 | PtrVT, DL, DAG.getEntryNode(), Offset, |
2775 | 7 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2776 | 7 | |
2777 | 7 | // Call __tls_get_offset to retrieve the offset. |
2778 | 7 | Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); |
2779 | 7 | break; |
2780 | 14 | } |
2781 | 14 | |
2782 | 4 | case TLSModel::LocalDynamic: { |
2783 | 4 | // Load the GOT offset of the module ID. |
2784 | 4 | SystemZConstantPoolValue *CPV = |
2785 | 4 | SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); |
2786 | 4 | |
2787 | 4 | Offset = DAG.getConstantPool(CPV, PtrVT, 8); |
2788 | 4 | Offset = DAG.getLoad( |
2789 | 4 | PtrVT, DL, DAG.getEntryNode(), Offset, |
2790 | 4 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2791 | 4 | |
2792 | 4 | // Call __tls_get_offset to retrieve the module base offset. |
2793 | 4 | Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); |
2794 | 4 | |
2795 | 4 | // Note: The SystemZLDCleanupPass will remove redundant computations |
2796 | 4 | // of the module base offset. Count total number of local-dynamic |
2797 | 4 | // accesses to trigger execution of that pass. |
2798 | 4 | SystemZMachineFunctionInfo* MFI = |
2799 | 4 | DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); |
2800 | 4 | MFI->incNumLocalDynamicTLSAccesses(); |
2801 | 4 | |
2802 | 4 | // Add the per-symbol offset. |
2803 | 4 | CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); |
2804 | 4 | |
2805 | 4 | SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8); |
2806 | 4 | DTPOffset = DAG.getLoad( |
2807 | 4 | PtrVT, DL, DAG.getEntryNode(), DTPOffset, |
2808 | 4 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2809 | 4 | |
2810 | 4 | Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); |
2811 | 4 | break; |
2812 | 14 | } |
2813 | 14 | |
2814 | 1 | case TLSModel::InitialExec: { |
2815 | 1 | // Load the offset from the GOT. |
2816 | 1 | Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, |
2817 | 1 | SystemZII::MO_INDNTPOFF); |
2818 | 1 | Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); |
2819 | 1 | Offset = |
2820 | 1 | DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset, |
2821 | 1 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
2822 | 1 | break; |
2823 | 14 | } |
2824 | 14 | |
2825 | 2 | case TLSModel::LocalExec: { |
2826 | 2 | // Force the offset into the constant pool and load it from there. |
2827 | 2 | SystemZConstantPoolValue *CPV = |
2828 | 2 | SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); |
2829 | 2 | |
2830 | 2 | Offset = DAG.getConstantPool(CPV, PtrVT, 8); |
2831 | 2 | Offset = DAG.getLoad( |
2832 | 2 | PtrVT, DL, DAG.getEntryNode(), Offset, |
2833 | 2 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2834 | 2 | break; |
2835 | 14 | } |
2836 | 14 | } |
2837 | 14 | |
2838 | 14 | // Add the base and offset together. |
2839 | 14 | return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); |
2840 | 14 | } |
2841 | | |
2842 | | SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, |
2843 | 1 | SelectionDAG &DAG) const { |
2844 | 1 | SDLoc DL(Node); |
2845 | 1 | const BlockAddress *BA = Node->getBlockAddress(); |
2846 | 1 | int64_t Offset = Node->getOffset(); |
2847 | 1 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2848 | 1 | |
2849 | 1 | SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); |
2850 | 1 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
2851 | 1 | return Result; |
2852 | 1 | } |
2853 | | |
2854 | | SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, |
2855 | 3 | SelectionDAG &DAG) const { |
2856 | 3 | SDLoc DL(JT); |
2857 | 3 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2858 | 3 | SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); |
2859 | 3 | |
2860 | 3 | // Use LARL to load the address of the table. |
2861 | 3 | return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
2862 | 3 | } |
2863 | | |
2864 | | SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, |
2865 | 306 | SelectionDAG &DAG) const { |
2866 | 306 | SDLoc DL(CP); |
2867 | 306 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2868 | 306 | |
2869 | 306 | SDValue Result; |
2870 | 306 | if (CP->isMachineConstantPoolEntry()) |
2871 | 17 | Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, |
2872 | 17 | CP->getAlignment()); |
2873 | 306 | else |
2874 | 289 | Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, |
2875 | 289 | CP->getAlignment(), CP->getOffset()); |
2876 | 306 | |
2877 | 306 | // Use LARL to load the address of the constant pool entry. |
2878 | 306 | return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
2879 | 306 | } |
2880 | | |
2881 | | SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, |
2882 | 2 | SelectionDAG &DAG) const { |
2883 | 2 | MachineFunction &MF = DAG.getMachineFunction(); |
2884 | 2 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2885 | 2 | MFI.setFrameAddressIsTaken(true); |
2886 | 2 | |
2887 | 2 | SDLoc DL(Op); |
2888 | 2 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
2889 | 2 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2890 | 2 | |
2891 | 2 | // If the back chain frame index has not been allocated yet, do so. |
2892 | 2 | SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>(); |
2893 | 2 | int BackChainIdx = FI->getFramePointerSaveIndex(); |
2894 | 2 | if (!BackChainIdx2 ) { |
2895 | 2 | // By definition, the frame address is the address of the back chain. |
2896 | 2 | BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false); |
2897 | 2 | FI->setFramePointerSaveIndex(BackChainIdx); |
2898 | 2 | } |
2899 | 2 | SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT); |
2900 | 2 | |
2901 | 2 | // FIXME The frontend should detect this case. |
2902 | 2 | if (Depth > 02 ) { |
2903 | 0 | report_fatal_error("Unsupported stack frame traversal count"); |
2904 | 0 | } |
2905 | 2 | |
2906 | 2 | return BackChain; |
2907 | 2 | } |
2908 | | |
2909 | | SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, |
2910 | 1 | SelectionDAG &DAG) const { |
2911 | 1 | MachineFunction &MF = DAG.getMachineFunction(); |
2912 | 1 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2913 | 1 | MFI.setReturnAddressIsTaken(true); |
2914 | 1 | |
2915 | 1 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
2916 | 0 | return SDValue(); |
2917 | 1 | |
2918 | 1 | SDLoc DL(Op); |
2919 | 1 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
2920 | 1 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2921 | 1 | |
2922 | 1 | // FIXME The frontend should detect this case. |
2923 | 1 | if (Depth > 01 ) { |
2924 | 0 | report_fatal_error("Unsupported stack frame traversal count"); |
2925 | 0 | } |
2926 | 1 | |
2927 | 1 | // Return R14D, which has the return address. Mark it an implicit live-in. |
2928 | 1 | unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass); |
2929 | 1 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT); |
2930 | 1 | } |
2931 | | |
2932 | | SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, |
2933 | 13 | SelectionDAG &DAG) const { |
2934 | 13 | SDLoc DL(Op); |
2935 | 13 | SDValue In = Op.getOperand(0); |
2936 | 13 | EVT InVT = In.getValueType(); |
2937 | 13 | EVT ResVT = Op.getValueType(); |
2938 | 13 | |
2939 | 13 | // Convert loads directly. This is normally done by DAGCombiner, |
2940 | 13 | // but we need this case for bitcasts that are created during lowering |
2941 | 13 | // and which are then lowered themselves. |
2942 | 13 | if (auto *LoadN = dyn_cast<LoadSDNode>(In)) |
2943 | 2 | if (2 ISD::isNormalLoad(LoadN)2 ) |
2944 | 0 | return DAG.getLoad(ResVT, DL, LoadN->getChain(), LoadN->getBasePtr(), |
2945 | 0 | LoadN->getMemOperand()); |
2946 | 13 | |
2947 | 13 | if (13 InVT == MVT::i32 && 13 ResVT == MVT::f327 ) { |
2948 | 7 | SDValue In64; |
2949 | 7 | if (Subtarget.hasHighWord()7 ) { |
2950 | 3 | SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, |
2951 | 3 | MVT::i64); |
2952 | 3 | In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, |
2953 | 3 | MVT::i64, SDValue(U64, 0), In); |
2954 | 7 | } else { |
2955 | 4 | In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); |
2956 | 4 | In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, |
2957 | 4 | DAG.getConstant(32, DL, MVT::i64)); |
2958 | 4 | } |
2959 | 7 | SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); |
2960 | 7 | return DAG.getTargetExtractSubreg(SystemZ::subreg_r32, |
2961 | 7 | DL, MVT::f32, Out64); |
2962 | 7 | } |
2963 | 6 | if (6 InVT == MVT::f32 && 6 ResVT == MVT::i326 ) { |
2964 | 6 | SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); |
2965 | 6 | SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_r32, DL, |
2966 | 6 | MVT::f64, SDValue(U64, 0), In); |
2967 | 6 | SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); |
2968 | 6 | if (Subtarget.hasHighWord()) |
2969 | 3 | return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, |
2970 | 3 | MVT::i32, Out64); |
2971 | 3 | SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, |
2972 | 3 | DAG.getConstant(32, DL, MVT::i64)); |
2973 | 3 | return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); |
2974 | 3 | } |
2975 | 0 | llvm_unreachable0 ("Unexpected bitcast combination"); |
2976 | 0 | } |
2977 | | |
2978 | | SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, |
2979 | 0 | SelectionDAG &DAG) const { |
2980 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
2981 | 0 | SystemZMachineFunctionInfo *FuncInfo = |
2982 | 0 | MF.getInfo<SystemZMachineFunctionInfo>(); |
2983 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2984 | 0 |
|
2985 | 0 | SDValue Chain = Op.getOperand(0); |
2986 | 0 | SDValue Addr = Op.getOperand(1); |
2987 | 0 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
2988 | 0 | SDLoc DL(Op); |
2989 | 0 |
|
2990 | 0 | // The initial values of each field. |
2991 | 0 | const unsigned NumFields = 4; |
2992 | 0 | SDValue Fields[NumFields] = { |
2993 | 0 | DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), |
2994 | 0 | DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), |
2995 | 0 | DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), |
2996 | 0 | DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) |
2997 | 0 | }; |
2998 | 0 |
|
2999 | 0 | // Store each field into its respective slot. |
3000 | 0 | SDValue MemOps[NumFields]; |
3001 | 0 | unsigned Offset = 0; |
3002 | 0 | for (unsigned I = 0; I < NumFields0 ; ++I0 ) { |
3003 | 0 | SDValue FieldAddr = Addr; |
3004 | 0 | if (Offset != 0) |
3005 | 0 | FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, |
3006 | 0 | DAG.getIntPtrConstant(Offset, DL)); |
3007 | 0 | MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, |
3008 | 0 | MachinePointerInfo(SV, Offset)); |
3009 | 0 | Offset += 8; |
3010 | 0 | } |
3011 | 0 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); |
3012 | 0 | } |
3013 | | |
3014 | | SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, |
3015 | 0 | SelectionDAG &DAG) const { |
3016 | 0 | SDValue Chain = Op.getOperand(0); |
3017 | 0 | SDValue DstPtr = Op.getOperand(1); |
3018 | 0 | SDValue SrcPtr = Op.getOperand(2); |
3019 | 0 | const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); |
3020 | 0 | const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); |
3021 | 0 | SDLoc DL(Op); |
3022 | 0 |
|
3023 | 0 | return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL), |
3024 | 0 | /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, |
3025 | 0 | /*isTailCall*/false, |
3026 | 0 | MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); |
3027 | 0 | } |
3028 | | |
3029 | | SDValue SystemZTargetLowering:: |
3030 | 26 | lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
3031 | 26 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); |
3032 | 26 | MachineFunction &MF = DAG.getMachineFunction(); |
3033 | 26 | bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack"); |
3034 | 26 | bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain"); |
3035 | 26 | |
3036 | 26 | SDValue Chain = Op.getOperand(0); |
3037 | 26 | SDValue Size = Op.getOperand(1); |
3038 | 26 | SDValue Align = Op.getOperand(2); |
3039 | 26 | SDLoc DL(Op); |
3040 | 26 | |
3041 | 26 | // If user has set the no alignment function attribute, ignore |
3042 | 26 | // alloca alignments. |
3043 | 26 | uint64_t AlignVal = (RealignOpt ? |
3044 | 26 | dyn_cast<ConstantSDNode>(Align)->getZExtValue()26 : 00 ); |
3045 | 26 | |
3046 | 26 | uint64_t StackAlign = TFI->getStackAlignment(); |
3047 | 26 | uint64_t RequiredAlign = std::max(AlignVal, StackAlign); |
3048 | 26 | uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; |
3049 | 26 | |
3050 | 26 | unsigned SPReg = getStackPointerRegisterToSaveRestore(); |
3051 | 26 | SDValue NeededSpace = Size; |
3052 | 26 | |
3053 | 26 | // Get a reference to the stack pointer. |
3054 | 26 | SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); |
3055 | 26 | |
3056 | 26 | // If we need a backchain, save it now. |
3057 | 26 | SDValue Backchain; |
3058 | 26 | if (StoreBackchain) |
3059 | 3 | Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); |
3060 | 26 | |
3061 | 26 | // Add extra space for alignment if needed. |
3062 | 26 | if (ExtraAlignSpace) |
3063 | 7 | NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace, |
3064 | 7 | DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); |
3065 | 26 | |
3066 | 26 | // Get the new stack pointer value. |
3067 | 26 | SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace); |
3068 | 26 | |
3069 | 26 | // Copy the new stack pointer back. |
3070 | 26 | Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); |
3071 | 26 | |
3072 | 26 | // The allocated data lives above the 160 bytes allocated for the standard |
3073 | 26 | // frame, plus any outgoing stack arguments. We don't know how much that |
3074 | 26 | // amounts to yet, so emit a special ADJDYNALLOC placeholder. |
3075 | 26 | SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); |
3076 | 26 | SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); |
3077 | 26 | |
3078 | 26 | // Dynamically realign if needed. |
3079 | 26 | if (RequiredAlign > StackAlign26 ) { |
3080 | 7 | Result = |
3081 | 7 | DAG.getNode(ISD::ADD, DL, MVT::i64, Result, |
3082 | 7 | DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); |
3083 | 7 | Result = |
3084 | 7 | DAG.getNode(ISD::AND, DL, MVT::i64, Result, |
3085 | 7 | DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); |
3086 | 7 | } |
3087 | 26 | |
3088 | 26 | if (StoreBackchain) |
3089 | 3 | Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); |
3090 | 26 | |
3091 | 26 | SDValue Ops[2] = { Result, Chain }; |
3092 | 26 | return DAG.getMergeValues(Ops, DL); |
3093 | 26 | } |
3094 | | |
3095 | | SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET( |
3096 | 3 | SDValue Op, SelectionDAG &DAG) const { |
3097 | 3 | SDLoc DL(Op); |
3098 | 3 | |
3099 | 3 | return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); |
3100 | 3 | } |
3101 | | |
3102 | | SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, |
3103 | 12 | SelectionDAG &DAG) const { |
3104 | 12 | EVT VT = Op.getValueType(); |
3105 | 12 | SDLoc DL(Op); |
3106 | 12 | SDValue Ops[2]; |
3107 | 12 | if (is32Bit(VT)) |
3108 | 12 | // Just do a normal 64-bit multiplication and extract the results. |
3109 | 12 | // We define this so that it can be used for constant division. |
3110 | 0 | lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), |
3111 | 0 | Op.getOperand(1), Ops[1], Ops[0]); |
3112 | 12 | else if (12 Subtarget.hasMiscellaneousExtensions2()12 ) |
3113 | 12 | // SystemZISD::SMUL_LOHI returns the low result in the odd register and |
3114 | 12 | // the high result in the even register. ISD::SMUL_LOHI is defined to |
3115 | 12 | // return the low half first, so the results are in reverse order. |
3116 | 10 | lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, |
3117 | 10 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); |
3118 | 2 | else { |
3119 | 2 | // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: |
3120 | 2 | // |
3121 | 2 | // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) |
3122 | 2 | // |
3123 | 2 | // but using the fact that the upper halves are either all zeros |
3124 | 2 | // or all ones: |
3125 | 2 | // |
3126 | 2 | // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) |
3127 | 2 | // |
3128 | 2 | // and grouping the right terms together since they are quicker than the |
3129 | 2 | // multiplication: |
3130 | 2 | // |
3131 | 2 | // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) |
3132 | 2 | SDValue C63 = DAG.getConstant(63, DL, MVT::i64); |
3133 | 2 | SDValue LL = Op.getOperand(0); |
3134 | 2 | SDValue RL = Op.getOperand(1); |
3135 | 2 | SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); |
3136 | 2 | SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); |
3137 | 2 | // SystemZISD::UMUL_LOHI returns the low result in the odd register and |
3138 | 2 | // the high result in the even register. ISD::SMUL_LOHI is defined to |
3139 | 2 | // return the low half first, so the results are in reverse order. |
3140 | 2 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, |
3141 | 2 | LL, RL, Ops[1], Ops[0]); |
3142 | 2 | SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); |
3143 | 2 | SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); |
3144 | 2 | SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); |
3145 | 2 | Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); |
3146 | 2 | } |
3147 | 12 | return DAG.getMergeValues(Ops, DL); |
3148 | 12 | } |
3149 | | |
3150 | | SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, |
3151 | 22 | SelectionDAG &DAG) const { |
3152 | 22 | EVT VT = Op.getValueType(); |
3153 | 22 | SDLoc DL(Op); |
3154 | 22 | SDValue Ops[2]; |
3155 | 22 | if (is32Bit(VT)) |
3156 | 22 | // Just do a normal 64-bit multiplication and extract the results. |
3157 | 22 | // We define this so that it can be used for constant division. |
3158 | 0 | lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), |
3159 | 0 | Op.getOperand(1), Ops[1], Ops[0]); |
3160 | 22 | else |
3161 | 22 | // SystemZISD::UMUL_LOHI returns the low result in the odd register and |
3162 | 22 | // the high result in the even register. ISD::UMUL_LOHI is defined to |
3163 | 22 | // return the low half first, so the results are in reverse order. |
3164 | 22 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, |
3165 | 22 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); |
3166 | 22 | return DAG.getMergeValues(Ops, DL); |
3167 | 22 | } |
3168 | | |
3169 | | SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, |
3170 | 105 | SelectionDAG &DAG) const { |
3171 | 105 | SDValue Op0 = Op.getOperand(0); |
3172 | 105 | SDValue Op1 = Op.getOperand(1); |
3173 | 105 | EVT VT = Op.getValueType(); |
3174 | 105 | SDLoc DL(Op); |
3175 | 105 | |
3176 | 105 | // We use DSGF for 32-bit division. This means the first operand must |
3177 | 105 | // always be 64-bit, and the second operand should be 32-bit whenever |
3178 | 105 | // that is possible, to improve performance. |
3179 | 105 | if (is32Bit(VT)) |
3180 | 57 | Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); |
3181 | 48 | else if (48 DAG.ComputeNumSignBits(Op1) > 3248 ) |
3182 | 13 | Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); |
3183 | 105 | |
3184 | 105 | // DSG(F) returns the remainder in the even register and the |
3185 | 105 | // quotient in the odd register. |
3186 | 105 | SDValue Ops[2]; |
3187 | 105 | lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); |
3188 | 105 | return DAG.getMergeValues(Ops, DL); |
3189 | 105 | } |
3190 | | |
3191 | | SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, |
3192 | 76 | SelectionDAG &DAG) const { |
3193 | 76 | EVT VT = Op.getValueType(); |
3194 | 76 | SDLoc DL(Op); |
3195 | 76 | |
3196 | 76 | // DL(G) returns the remainder in the even register and the |
3197 | 76 | // quotient in the odd register. |
3198 | 76 | SDValue Ops[2]; |
3199 | 76 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, |
3200 | 76 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); |
3201 | 76 | return DAG.getMergeValues(Ops, DL); |
3202 | 76 | } |
3203 | | |
3204 | 432 | SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { |
3205 | 432 | assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); |
3206 | 432 | |
3207 | 432 | // Get the known-zero masks for each operand. |
3208 | 432 | SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; |
3209 | 432 | KnownBits Known[2]; |
3210 | 432 | DAG.computeKnownBits(Ops[0], Known[0]); |
3211 | 432 | DAG.computeKnownBits(Ops[1], Known[1]); |
3212 | 432 | |
3213 | 432 | // See if the upper 32 bits of one operand and the lower 32 bits of the |
3214 | 432 | // other are known zero. They are the low and high operands respectively. |
3215 | 432 | uint64_t Masks[] = { Known[0].Zero.getZExtValue(), |
3216 | 432 | Known[1].Zero.getZExtValue() }; |
3217 | 432 | unsigned High, Low; |
3218 | 432 | if ((Masks[0] >> 32) == 0xffffffff && 432 uint32_t(Masks[1]) == 0xffffffff67 ) |
3219 | 27 | High = 1, Low = 0; |
3220 | 405 | else if (405 (Masks[1] >> 32) == 0xffffffff && 405 uint32_t(Masks[0]) == 0xffffffff205 ) |
3221 | 45 | High = 0, Low = 1; |
3222 | 405 | else |
3223 | 360 | return Op; |
3224 | 72 | |
3225 | 72 | SDValue LowOp = Ops[Low]; |
3226 | 72 | SDValue HighOp = Ops[High]; |
3227 | 72 | |
3228 | 72 | // If the high part is a constant, we're better off using IILH. |
3229 | 72 | if (HighOp.getOpcode() == ISD::Constant) |
3230 | 20 | return Op; |
3231 | 52 | |
3232 | 52 | // If the low part is a constant that is outside the range of LHI, |
3233 | 52 | // then we're better off using IILF. |
3234 | 52 | if (52 LowOp.getOpcode() == ISD::Constant52 ) { |
3235 | 19 | int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); |
3236 | 19 | if (!isInt<16>(Value)) |
3237 | 14 | return Op; |
3238 | 38 | } |
3239 | 38 | |
3240 | 38 | // Check whether the high part is an AND that doesn't change the |
3241 | 38 | // high 32 bits and just masks out low bits. We can skip it if so. |
3242 | 38 | if (38 HighOp.getOpcode() == ISD::AND && |
3243 | 38 | HighOp.getOperand(1).getOpcode() == ISD::Constant16 ) { |
3244 | 16 | SDValue HighOp0 = HighOp.getOperand(0); |
3245 | 16 | uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); |
3246 | 16 | if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) |
3247 | 15 | HighOp = HighOp0; |
3248 | 16 | } |
3249 | 432 | |
3250 | 432 | // Take advantage of the fact that all GR32 operations only change the |
3251 | 432 | // low 32 bits by truncating Low to an i32 and inserting it directly |
3252 | 432 | // using a subreg. The interesting cases are those where the truncation |
3253 | 432 | // can be folded. |
3254 | 432 | SDLoc DL(Op); |
3255 | 432 | SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); |
3256 | 432 | return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, |
3257 | 432 | MVT::i64, HighOp, Low32); |
3258 | 432 | } |
3259 | | |
3260 | | SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, |
3261 | 11 | SelectionDAG &DAG) const { |
3262 | 11 | EVT VT = Op.getValueType(); |
3263 | 11 | SDLoc DL(Op); |
3264 | 11 | Op = Op.getOperand(0); |
3265 | 11 | |
3266 | 11 | // Handle vector types via VPOPCT. |
3267 | 11 | if (VT.isVector()11 ) { |
3268 | 4 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); |
3269 | 4 | Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); |
3270 | 4 | switch (VT.getScalarSizeInBits()) { |
3271 | 1 | case 8: |
3272 | 1 | break; |
3273 | 1 | case 16: { |
3274 | 1 | Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); |
3275 | 1 | SDValue Shift = DAG.getConstant(8, DL, MVT::i32); |
3276 | 1 | SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); |
3277 | 1 | Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); |
3278 | 1 | Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); |
3279 | 1 | break; |
3280 | 4 | } |
3281 | 1 | case 32: { |
3282 | 1 | SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, |
3283 | 1 | DAG.getConstant(0, DL, MVT::i32)); |
3284 | 1 | Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); |
3285 | 1 | break; |
3286 | 4 | } |
3287 | 1 | case 64: { |
3288 | 1 | SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, |
3289 | 1 | DAG.getConstant(0, DL, MVT::i32)); |
3290 | 1 | Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp); |
3291 | 1 | Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); |
3292 | 1 | break; |
3293 | 4 | } |
3294 | 0 | default: |
3295 | 0 | llvm_unreachable("Unexpected type"); |
3296 | 4 | } |
3297 | 4 | return Op; |
3298 | 4 | } |
3299 | 7 | |
3300 | 7 | // Get the known-zero mask for the operand. |
3301 | 7 | KnownBits Known; |
3302 | 7 | DAG.computeKnownBits(Op, Known); |
3303 | 7 | unsigned NumSignificantBits = (~Known.Zero).getActiveBits(); |
3304 | 7 | if (NumSignificantBits == 0) |
3305 | 0 | return DAG.getConstant(0, DL, VT); |
3306 | 7 | |
3307 | 7 | // Skip known-zero high parts of the operand. |
3308 | 7 | int64_t OrigBitSize = VT.getSizeInBits(); |
3309 | 7 | int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits); |
3310 | 7 | BitSize = std::min(BitSize, OrigBitSize); |
3311 | 7 | |
3312 | 7 | // The POPCNT instruction counts the number of bits in each byte. |
3313 | 7 | Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op); |
3314 | 7 | Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op); |
3315 | 7 | Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); |
3316 | 7 | |
3317 | 7 | // Add up per-byte counts in a binary tree. All bits of Op at |
3318 | 7 | // position larger than BitSize remain zero throughout. |
3319 | 16 | for (int64_t I = BitSize / 2; I >= 816 ; I = I / 29 ) { |
3320 | 9 | SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); |
3321 | 9 | if (BitSize != OrigBitSize) |
3322 | 4 | Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, |
3323 | 4 | DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); |
3324 | 9 | Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); |
3325 | 9 | } |
3326 | 7 | |
3327 | 7 | // Extract overall result from high byte. |
3328 | 7 | if (BitSize > 8) |
3329 | 5 | Op = DAG.getNode(ISD::SRL, DL, VT, Op, |
3330 | 5 | DAG.getConstant(BitSize - 8, DL, VT)); |
3331 | 11 | |
3332 | 11 | return Op; |
3333 | 11 | } |
3334 | | |
3335 | | SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, |
3336 | 7 | SelectionDAG &DAG) const { |
3337 | 7 | SDLoc DL(Op); |
3338 | 7 | AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( |
3339 | 7 | cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); |
3340 | 7 | SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( |
3341 | 7 | cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); |
3342 | 7 | |
3343 | 7 | // The only fence that needs an instruction is a sequentially-consistent |
3344 | 7 | // cross-thread fence. |
3345 | 7 | if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && |
3346 | 7 | FenceSSID == SyncScope::System4 ) { |
3347 | 4 | return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, |
3348 | 4 | Op.getOperand(0)), |
3349 | 4 | 0); |
3350 | 4 | } |
3351 | 3 | |
3352 | 3 | // MEMBARRIER is a compiler barrier; it codegens to a no-op. |
3353 | 3 | return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); |
3354 | 3 | } |
3355 | | |
3356 | | // Op is an atomic load. Lower it into a normal volatile load. |
3357 | | SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, |
3358 | 17 | SelectionDAG &DAG) const { |
3359 | 17 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
3360 | 17 | return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), |
3361 | 17 | Node->getChain(), Node->getBasePtr(), |
3362 | 17 | Node->getMemoryVT(), Node->getMemOperand()); |
3363 | 17 | } |
3364 | | |
3365 | | // Op is an atomic store. Lower it into a normal volatile store. |
3366 | | SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, |
3367 | 21 | SelectionDAG &DAG) const { |
3368 | 21 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
3369 | 21 | SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), |
3370 | 21 | Node->getBasePtr(), Node->getMemoryVT(), |
3371 | 21 | Node->getMemOperand()); |
3372 | 21 | // We have to enforce sequential consistency by performing a |
3373 | 21 | // serialization operation after the store. |
3374 | 21 | if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent) |
3375 | 4 | Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), |
3376 | 4 | MVT::Other, Chain), 0); |
3377 | 21 | return Chain; |
3378 | 21 | } |
3379 | | |
3380 | | // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first |
3381 | | // two into the fullword ATOMIC_LOADW_* operation given by Opcode. |
3382 | | SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, |
3383 | | SelectionDAG &DAG, |
3384 | 468 | unsigned Opcode) const { |
3385 | 468 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
3386 | 468 | |
3387 | 468 | // 32-bit operations need no code outside the main loop. |
3388 | 468 | EVT NarrowVT = Node->getMemoryVT(); |
3389 | 468 | EVT WideVT = MVT::i32; |
3390 | 468 | if (NarrowVT == WideVT) |
3391 | 196 | return Op; |
3392 | 272 | |
3393 | 272 | int64_t BitSize = NarrowVT.getSizeInBits(); |
3394 | 272 | SDValue ChainIn = Node->getChain(); |
3395 | 272 | SDValue Addr = Node->getBasePtr(); |
3396 | 272 | SDValue Src2 = Node->getVal(); |
3397 | 272 | MachineMemOperand *MMO = Node->getMemOperand(); |
3398 | 272 | SDLoc DL(Node); |
3399 | 272 | EVT PtrVT = Addr.getValueType(); |
3400 | 272 | |
3401 | 272 | // Convert atomic subtracts of constants into additions. |
3402 | 272 | if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) |
3403 | 36 | if (auto *36 Const36 = dyn_cast<ConstantSDNode>(Src2)) { |
3404 | 30 | Opcode = SystemZISD::ATOMIC_LOADW_ADD; |
3405 | 30 | Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); |
3406 | 30 | } |
3407 | 272 | |
3408 | 272 | // Get the address of the containing word. |
3409 | 272 | SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, |
3410 | 272 | DAG.getConstant(-4, DL, PtrVT)); |
3411 | 272 | |
3412 | 272 | // Get the number of bits that the word must be rotated left in order |
3413 | 272 | // to bring the field to the top bits of a GR32. |
3414 | 272 | SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, |
3415 | 272 | DAG.getConstant(3, DL, PtrVT)); |
3416 | 272 | BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); |
3417 | 272 | |
3418 | 272 | // Get the complementing shift amount, for rotating a field in the top |
3419 | 272 | // bits back to its proper position. |
3420 | 272 | SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, |
3421 | 272 | DAG.getConstant(0, DL, WideVT), BitShift); |
3422 | 272 | |
3423 | 272 | // Extend the source operand to 32 bits and prepare it for the inner loop. |
3424 | 272 | // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other |
3425 | 272 | // operations require the source to be shifted in advance. (This shift |
3426 | 272 | // can be folded if the source is constant.) For AND and NAND, the lower |
3427 | 272 | // bits must be set, while for other opcodes they should be left clear. |
3428 | 272 | if (Opcode != SystemZISD::ATOMIC_SWAPW) |
3429 | 264 | Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, |
3430 | 264 | DAG.getConstant(32 - BitSize, DL, WideVT)); |
3431 | 272 | if (Opcode == SystemZISD::ATOMIC_LOADW_AND || |
3432 | 236 | Opcode == SystemZISD::ATOMIC_LOADW_NAND) |
3433 | 72 | Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, |
3434 | 72 | DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); |
3435 | 468 | |
3436 | 468 | // Construct the ATOMIC_LOADW_* node. |
3437 | 468 | SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); |
3438 | 468 | SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, |
3439 | 468 | DAG.getConstant(BitSize, DL, WideVT) }; |
3440 | 468 | SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, |
3441 | 468 | NarrowVT, MMO); |
3442 | 468 | |
3443 | 468 | // Rotate the result of the final CS so that the field is in the lower |
3444 | 468 | // bits of a GR32, then truncate it. |
3445 | 468 | SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, |
3446 | 468 | DAG.getConstant(BitSize, DL, WideVT)); |
3447 | 468 | SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); |
3448 | 468 | |
3449 | 468 | SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; |
3450 | 468 | return DAG.getMergeValues(RetOps, DL); |
3451 | 468 | } |
3452 | | |
3453 | | // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations |
3454 | | // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit |
3455 | | // operations into additions. |
3456 | | SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, |
3457 | 72 | SelectionDAG &DAG) const { |
3458 | 72 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
3459 | 72 | EVT MemVT = Node->getMemoryVT(); |
3460 | 72 | if (MemVT == MVT::i32 || 72 MemVT == MVT::i6456 ) { |
3461 | 36 | // A full-width operation. |
3462 | 36 | assert(Op.getValueType() == MemVT && "Mismatched VTs"); |
3463 | 36 | SDValue Src2 = Node->getVal(); |
3464 | 36 | SDValue NegSrc2; |
3465 | 36 | SDLoc DL(Src2); |
3466 | 36 | |
3467 | 36 | if (auto *Op236 = dyn_cast<ConstantSDNode>(Src2)) { |
3468 | 22 | // Use an addition if the operand is constant and either LAA(G) is |
3469 | 22 | // available or the negative value is in the range of A(G)FHI. |
3470 | 22 | int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); |
3471 | 22 | if (isInt<32>(Value) || 22 Subtarget.hasInterlockedAccess1()4 ) |
3472 | 18 | NegSrc2 = DAG.getConstant(Value, DL, MemVT); |
3473 | 36 | } else if (14 Subtarget.hasInterlockedAccess1()14 ) |
3474 | 14 | // Use LAA(G) if available. |
3475 | 10 | NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), |
3476 | 10 | Src2); |
3477 | 36 | |
3478 | 36 | if (NegSrc2.getNode()) |
3479 | 28 | return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, |
3480 | 28 | Node->getChain(), Node->getBasePtr(), NegSrc2, |
3481 | 28 | Node->getMemOperand()); |
3482 | 8 | |
3483 | 8 | // Use the node as-is. |
3484 | 8 | return Op; |
3485 | 8 | } |
3486 | 36 | |
3487 | 36 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); |
3488 | 36 | } |
3489 | | |
3490 | | // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node. |
3491 | | SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, |
3492 | 39 | SelectionDAG &DAG) const { |
3493 | 39 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
3494 | 39 | SDValue ChainIn = Node->getOperand(0); |
3495 | 39 | SDValue Addr = Node->getOperand(1); |
3496 | 39 | SDValue CmpVal = Node->getOperand(2); |
3497 | 39 | SDValue SwapVal = Node->getOperand(3); |
3498 | 39 | MachineMemOperand *MMO = Node->getMemOperand(); |
3499 | 39 | SDLoc DL(Node); |
3500 | 39 | |
3501 | 39 | // We have native support for 32-bit and 64-bit compare and swap, but we |
3502 | 39 | // still need to expand extracting the "success" result from the CC. |
3503 | 39 | EVT NarrowVT = Node->getMemoryVT(); |
3504 | 39 | EVT WideVT = NarrowVT == MVT::i64 ? MVT::i6410 : MVT::i3229 ; |
3505 | 39 | if (NarrowVT == WideVT39 ) { |
3506 | 23 | SDVTList Tys = DAG.getVTList(WideVT, MVT::Other, MVT::Glue); |
3507 | 23 | SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal }; |
3508 | 23 | SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP, |
3509 | 23 | DL, Tys, Ops, NarrowVT, MMO); |
3510 | 23 | SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(2), |
3511 | 23 | SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); |
3512 | 23 | |
3513 | 23 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); |
3514 | 23 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); |
3515 | 23 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(1)); |
3516 | 23 | return SDValue(); |
3517 | 23 | } |
3518 | 16 | |
3519 | 16 | // Convert 8-bit and 16-bit compare and swap to a loop, implemented |
3520 | 16 | // via a fullword ATOMIC_CMP_SWAPW operation. |
3521 | 16 | int64_t BitSize = NarrowVT.getSizeInBits(); |
3522 | 16 | EVT PtrVT = Addr.getValueType(); |
3523 | 16 | |
3524 | 16 | // Get the address of the containing word. |
3525 | 16 | SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, |
3526 | 16 | DAG.getConstant(-4, DL, PtrVT)); |
3527 | 16 | |
3528 | 16 | // Get the number of bits that the word must be rotated left in order |
3529 | 16 | // to bring the field to the top bits of a GR32. |
3530 | 16 | SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, |
3531 | 16 | DAG.getConstant(3, DL, PtrVT)); |
3532 | 16 | BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); |
3533 | 16 | |
3534 | 16 | // Get the complementing shift amount, for rotating a field in the top |
3535 | 16 | // bits back to its proper position. |
3536 | 16 | SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, |
3537 | 16 | DAG.getConstant(0, DL, WideVT), BitShift); |
3538 | 16 | |
3539 | 16 | // Construct the ATOMIC_CMP_SWAPW node. |
3540 | 16 | SDVTList VTList = DAG.getVTList(WideVT, MVT::Other, MVT::Glue); |
3541 | 16 | SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, |
3542 | 16 | NegBitShift, DAG.getConstant(BitSize, DL, WideVT) }; |
3543 | 16 | SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, |
3544 | 16 | VTList, Ops, NarrowVT, MMO); |
3545 | 16 | SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(2), |
3546 | 16 | SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ); |
3547 | 16 | |
3548 | 16 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); |
3549 | 16 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); |
3550 | 16 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(1)); |
3551 | 16 | return SDValue(); |
3552 | 16 | } |
3553 | | |
3554 | | SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, |
3555 | 3 | SelectionDAG &DAG) const { |
3556 | 3 | MachineFunction &MF = DAG.getMachineFunction(); |
3557 | 3 | MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); |
3558 | 3 | return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), |
3559 | 3 | SystemZ::R15D, Op.getValueType()); |
3560 | 3 | } |
3561 | | |
3562 | | SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, |
3563 | 2 | SelectionDAG &DAG) const { |
3564 | 2 | MachineFunction &MF = DAG.getMachineFunction(); |
3565 | 2 | MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); |
3566 | 2 | bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain"); |
3567 | 2 | |
3568 | 2 | SDValue Chain = Op.getOperand(0); |
3569 | 2 | SDValue NewSP = Op.getOperand(1); |
3570 | 2 | SDValue Backchain; |
3571 | 2 | SDLoc DL(Op); |
3572 | 2 | |
3573 | 2 | if (StoreBackchain2 ) { |
3574 | 1 | SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64); |
3575 | 1 | Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); |
3576 | 1 | } |
3577 | 2 | |
3578 | 2 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP); |
3579 | 2 | |
3580 | 2 | if (StoreBackchain) |
3581 | 1 | Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); |
3582 | 2 | |
3583 | 2 | return Chain; |
3584 | 2 | } |
3585 | | |
3586 | | SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, |
3587 | 8 | SelectionDAG &DAG) const { |
3588 | 8 | bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); |
3589 | 8 | if (!IsData) |
3590 | 8 | // Just preserve the chain. |
3591 | 2 | return Op.getOperand(0); |
3592 | 6 | |
3593 | 6 | SDLoc DL(Op); |
3594 | 6 | bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); |
3595 | 6 | unsigned Code = IsWrite ? SystemZ::PFD_WRITE5 : SystemZ::PFD_READ1 ; |
3596 | 8 | auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); |
3597 | 8 | SDValue Ops[] = { |
3598 | 8 | Op.getOperand(0), |
3599 | 8 | DAG.getConstant(Code, DL, MVT::i32), |
3600 | 8 | Op.getOperand(1) |
3601 | 8 | }; |
3602 | 8 | return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL, |
3603 | 8 | Node->getVTList(), Ops, |
3604 | 8 | Node->getMemoryVT(), Node->getMemOperand()); |
3605 | 8 | } |
3606 | | |
3607 | | // Return an i32 that contains the value of CC immediately after After, |
3608 | | // whose final operand must be MVT::Glue. |
3609 | 94 | static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) { |
3610 | 94 | SDLoc DL(After); |
3611 | 94 | SDValue Glue = SDValue(After, After->getNumValues() - 1); |
3612 | 94 | SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); |
3613 | 94 | return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM, |
3614 | 94 | DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32)); |
3615 | 94 | } |
3616 | | |
3617 | | SDValue |
3618 | | SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, |
3619 | 50 | SelectionDAG &DAG) const { |
3620 | 50 | unsigned Opcode, CCValid; |
3621 | 50 | if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)50 ) { |
3622 | 12 | assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); |
3623 | 12 | SDValue Glued = emitIntrinsicWithChainAndGlue(DAG, Op, Opcode); |
3624 | 12 | SDValue CC = getCCResult(DAG, Glued.getNode()); |
3625 | 12 | DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC); |
3626 | 12 | return SDValue(); |
3627 | 12 | } |
3628 | 38 | |
3629 | 38 | return SDValue(); |
3630 | 38 | } |
3631 | | |
3632 | | SDValue |
3633 | | SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, |
3634 | 420 | SelectionDAG &DAG) const { |
3635 | 420 | unsigned Opcode, CCValid; |
3636 | 420 | if (isIntrinsicWithCC(Op, Opcode, CCValid)420 ) { |
3637 | 82 | SDValue Glued = emitIntrinsicWithGlue(DAG, Op, Opcode); |
3638 | 82 | SDValue CC = getCCResult(DAG, Glued.getNode()); |
3639 | 82 | if (Op->getNumValues() == 1) |
3640 | 5 | return CC; |
3641 | 0 | assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result"); |
3642 | 77 | return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), Glued, |
3643 | 77 | CC); |
3644 | 77 | } |
3645 | 338 | |
3646 | 338 | unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
3647 | 338 | switch (Id) { |
3648 | 1 | case Intrinsic::thread_pointer: |
3649 | 1 | return lowerThreadPointer(SDLoc(Op), DAG); |
3650 | 338 | |
3651 | 2 | case Intrinsic::s390_vpdi: |
3652 | 2 | return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(), |
3653 | 2 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
3654 | 338 | |
3655 | 1 | case Intrinsic::s390_vperm: |
3656 | 1 | return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(), |
3657 | 1 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
3658 | 338 | |
3659 | 3 | case Intrinsic::s390_vuphb: |
3660 | 3 | case Intrinsic::s390_vuphh: |
3661 | 3 | case Intrinsic::s390_vuphf: |
3662 | 3 | return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(), |
3663 | 3 | Op.getOperand(1)); |
3664 | 3 | |
3665 | 3 | case Intrinsic::s390_vuplhb: |
3666 | 3 | case Intrinsic::s390_vuplhh: |
3667 | 3 | case Intrinsic::s390_vuplhf: |
3668 | 3 | return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(), |
3669 | 3 | Op.getOperand(1)); |
3670 | 3 | |
3671 | 3 | case Intrinsic::s390_vuplb: |
3672 | 3 | case Intrinsic::s390_vuplhw: |
3673 | 3 | case Intrinsic::s390_vuplf: |
3674 | 3 | return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(), |
3675 | 3 | Op.getOperand(1)); |
3676 | 3 | |
3677 | 3 | case Intrinsic::s390_vupllb: |
3678 | 3 | case Intrinsic::s390_vupllh: |
3679 | 3 | case Intrinsic::s390_vupllf: |
3680 | 3 | return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(), |
3681 | 3 | Op.getOperand(1)); |
3682 | 3 | |
3683 | 6 | case Intrinsic::s390_vsumb: |
3684 | 6 | case Intrinsic::s390_vsumh: |
3685 | 6 | case Intrinsic::s390_vsumgh: |
3686 | 6 | case Intrinsic::s390_vsumgf: |
3687 | 6 | case Intrinsic::s390_vsumqf: |
3688 | 6 | case Intrinsic::s390_vsumqg: |
3689 | 6 | return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(), |
3690 | 6 | Op.getOperand(1), Op.getOperand(2)); |
3691 | 316 | } |
3692 | 316 | |
3693 | 316 | return SDValue(); |
3694 | 316 | } |
3695 | | |
3696 | | namespace { |
3697 | | // Says that SystemZISD operation Opcode can be used to perform the equivalent |
3698 | | // of a VPERM with permute vector Bytes. If Opcode takes three operands, |
3699 | | // Operand is the constant third operand, otherwise it is the number of |
3700 | | // bytes in each element of the result. |
3701 | | struct Permute { |
3702 | | unsigned Opcode; |
3703 | | unsigned Operand; |
3704 | | unsigned char Bytes[SystemZ::VectorBytes]; |
3705 | | }; |
3706 | | } |
3707 | | |
3708 | | static const Permute PermuteForms[] = { |
3709 | | // VMRHG |
3710 | | { SystemZISD::MERGE_HIGH, 8, |
3711 | | { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } }, |
3712 | | // VMRHF |
3713 | | { SystemZISD::MERGE_HIGH, 4, |
3714 | | { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } }, |
3715 | | // VMRHH |
3716 | | { SystemZISD::MERGE_HIGH, 2, |
3717 | | { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } }, |
3718 | | // VMRHB |
3719 | | { SystemZISD::MERGE_HIGH, 1, |
3720 | | { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } }, |
3721 | | // VMRLG |
3722 | | { SystemZISD::MERGE_LOW, 8, |
3723 | | { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } }, |
3724 | | // VMRLF |
3725 | | { SystemZISD::MERGE_LOW, 4, |
3726 | | { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, |
3727 | | // VMRLH |
3728 | | { SystemZISD::MERGE_LOW, 2, |
3729 | | { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } }, |
3730 | | // VMRLB |
3731 | | { SystemZISD::MERGE_LOW, 1, |
3732 | | { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } }, |
3733 | | // VPKG |
3734 | | { SystemZISD::PACK, 4, |
3735 | | { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } }, |
3736 | | // VPKF |
3737 | | { SystemZISD::PACK, 2, |
3738 | | { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } }, |
3739 | | // VPKH |
3740 | | { SystemZISD::PACK, 1, |
3741 | | { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } }, |
3742 | | // VPDI V1, V2, 4 (low half of V1, high half of V2) |
3743 | | { SystemZISD::PERMUTE_DWORDS, 4, |
3744 | | { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, |
3745 | | // VPDI V1, V2, 1 (high half of V1, low half of V2) |
3746 | | { SystemZISD::PERMUTE_DWORDS, 1, |
3747 | | { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } } |
3748 | | }; |
3749 | | |
3750 | | // Called after matching a vector shuffle against a particular pattern. |
3751 | | // Both the original shuffle and the pattern have two vector operands. |
3752 | | // OpNos[0] is the operand of the original shuffle that should be used for |
3753 | | // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything. |
3754 | | // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and |
3755 | | // set OpNo0 and OpNo1 to the shuffle operands that should actually be used |
3756 | | // for operands 0 and 1 of the pattern. |
3757 | 1.16k | static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) { |
3758 | 1.16k | if (OpNos[0] < 01.16k ) { |
3759 | 2 | if (OpNos[1] < 0) |
3760 | 0 | return false; |
3761 | 2 | OpNo0 = OpNo1 = OpNos[1]; |
3762 | 1.16k | } else if (1.16k OpNos[1] < 01.16k ) { |
3763 | 959 | OpNo0 = OpNo1 = OpNos[0]; |
3764 | 1.16k | } else { |
3765 | 203 | OpNo0 = OpNos[0]; |
3766 | 203 | OpNo1 = OpNos[1]; |
3767 | 203 | } |
3768 | 1.16k | return true; |
3769 | 1.16k | } |
3770 | | |
3771 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
3772 | | // undefined bytes. Return true if the VPERM can be implemented using P. |
3773 | | // When returning true set OpNo0 to the VPERM operand that should be |
3774 | | // used for operand 0 of P and likewise OpNo1 for operand 1 of P. |
3775 | | // |
3776 | | // For example, if swapping the VPERM operands allows P to match, OpNo0 |
3777 | | // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one |
3778 | | // operand, but rewriting it to use two duplicated operands allows it to |
3779 | | // match P, then OpNo0 and OpNo1 will be the same. |
3780 | | static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P, |
3781 | 7.59k | unsigned &OpNo0, unsigned &OpNo1) { |
3782 | 7.59k | int OpNos[] = { -1, -1 }; |
3783 | 31.5k | for (unsigned I = 0; I < SystemZ::VectorBytes31.5k ; ++I23.9k ) { |
3784 | 30.4k | int Elt = Bytes[I]; |
3785 | 30.4k | if (Elt >= 030.4k ) { |
3786 | 19.8k | // Make sure that the two permute vectors use the same suboperand |
3787 | 19.8k | // byte number. Only the operand numbers (the high bits) are |
3788 | 19.8k | // allowed to differ. |
3789 | 19.8k | if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) |
3790 | 6.46k | return false; |
3791 | 13.3k | int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes; |
3792 | 13.3k | int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes; |
3793 | 13.3k | // Make sure that the operand mappings are consistent with previous |
3794 | 13.3k | // elements. |
3795 | 13.3k | if (OpNos[ModelOpNo] == 1 - RealOpNo) |
3796 | 16 | return false; |
3797 | 13.3k | OpNos[ModelOpNo] = RealOpNo; |
3798 | 13.3k | } |
3799 | 30.4k | } |
3800 | 1.11k | return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); |
3801 | 7.59k | } |
3802 | | |
3803 | | // As above, but search for a matching permute. |
3804 | | static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes, |
3805 | 1.22k | unsigned &OpNo0, unsigned &OpNo1) { |
3806 | 1.22k | for (auto &P : PermuteForms) |
3807 | 7.59k | if (7.59k matchPermute(Bytes, P, OpNo0, OpNo1)7.59k ) |
3808 | 1.11k | return &P; |
3809 | 113 | return nullptr; |
3810 | 113 | } |
3811 | | |
3812 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
3813 | | // undefined bytes. This permute is an operand of an outer permute. |
3814 | | // See whether redistributing the -1 bytes gives a shuffle that can be |
3815 | | // implemented using P. If so, set Transform to a VPERM-like permute vector |
3816 | | // that, when applied to the result of P, gives the original permute in Bytes. |
3817 | | static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes, |
3818 | | const Permute &P, |
3819 | 614 | SmallVectorImpl<int> &Transform) { |
3820 | 614 | unsigned To = 0; |
3821 | 4.59k | for (unsigned From = 0; From < SystemZ::VectorBytes4.59k ; ++From3.98k ) { |
3822 | 4.53k | int Elt = Bytes[From]; |
3823 | 4.53k | if (Elt < 0) |
3824 | 4.53k | // Byte number From of the result is undefined. |
3825 | 3.15k | Transform[From] = -1; |
3826 | 1.38k | else { |
3827 | 10.5k | while (P.Bytes[To] != Elt10.5k ) { |
3828 | 9.75k | To += 1; |
3829 | 9.75k | if (To == SystemZ::VectorBytes) |
3830 | 548 | return false; |
3831 | 9.75k | } |
3832 | 832 | Transform[From] = To; |
3833 | 832 | } |
3834 | 4.53k | } |
3835 | 66 | return true; |
3836 | 614 | } |
3837 | | |
3838 | | // As above, but search for a matching permute. |
3839 | | static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes, |
3840 | 66 | SmallVectorImpl<int> &Transform) { |
3841 | 66 | for (auto &P : PermuteForms) |
3842 | 614 | if (614 matchDoublePermute(Bytes, P, Transform)614 ) |
3843 | 66 | return &P; |
3844 | 0 | return nullptr; |
3845 | 0 | } |
3846 | | |
3847 | | // Convert the mask of the given VECTOR_SHUFFLE into a byte-level mask, |
3848 | | // as if it had type vNi8. |
3849 | | static void getVPermMask(ShuffleVectorSDNode *VSN, |
3850 | 116 | SmallVectorImpl<int> &Bytes) { |
3851 | 116 | EVT VT = VSN->getValueType(0); |
3852 | 116 | unsigned NumElements = VT.getVectorNumElements(); |
3853 | 116 | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); |
3854 | 116 | Bytes.resize(NumElements * BytesPerElement, -1); |
3855 | 890 | for (unsigned I = 0; I < NumElements890 ; ++I774 ) { |
3856 | 774 | int Index = VSN->getMaskElt(I); |
3857 | 774 | if (Index >= 0) |
3858 | 1.59k | for (unsigned J = 0; 463 J < BytesPerElement1.59k ; ++J1.13k ) |
3859 | 1.13k | Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; |
3860 | 774 | } |
3861 | 116 | } |
3862 | | |
3863 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
3864 | | // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of |
3865 | | // the result come from a contiguous sequence of bytes from one input. |
3866 | | // Set Base to the selector for the first byte if so. |
3867 | | static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start, |
3868 | 116 | unsigned BytesPerElement, int &Base) { |
3869 | 116 | Base = -1; |
3870 | 292 | for (unsigned I = 0; I < BytesPerElement292 ; ++I176 ) { |
3871 | 176 | if (Bytes[Start + I] >= 0176 ) { |
3872 | 176 | unsigned Elem = Bytes[Start + I]; |
3873 | 176 | if (Base < 0176 ) { |
3874 | 116 | Base = Elem - I; |
3875 | 116 | // Make sure the bytes would come from one input operand. |
3876 | 116 | if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size()) |
3877 | 0 | return false; |
3878 | 60 | } else if (60 unsigned(Base) != Elem - I60 ) |
3879 | 0 | return false; |
3880 | 176 | } |
3881 | 176 | } |
3882 | 116 | return true; |
3883 | 116 | } |
3884 | | |
3885 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
3886 | | // undefined bytes. Return true if it can be performed using VSLDI. |
3887 | | // When returning true, set StartIndex to the shift amount and OpNo0 |
3888 | | // and OpNo1 to the VPERM operands that should be used as the first |
3889 | | // and second shift operand respectively. |
3890 | | static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes, |
3891 | | unsigned &StartIndex, unsigned &OpNo0, |
3892 | 113 | unsigned &OpNo1) { |
3893 | 113 | int OpNos[] = { -1, -1 }; |
3894 | 113 | int Shift = -1; |
3895 | 1.06k | for (unsigned I = 0; I < 161.06k ; ++I952 ) { |
3896 | 1.01k | int Index = Bytes[I]; |
3897 | 1.01k | if (Index >= 01.01k ) { |
3898 | 489 | int ExpectedShift = (Index - I) % SystemZ::VectorBytes; |
3899 | 489 | int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes; |
3900 | 489 | int RealOpNo = unsigned(Index) / SystemZ::VectorBytes; |
3901 | 489 | if (Shift < 0) |
3902 | 113 | Shift = ExpectedShift; |
3903 | 376 | else if (376 Shift != ExpectedShift376 ) |
3904 | 57 | return false; |
3905 | 432 | // Make sure that the operand mappings are consistent with previous |
3906 | 432 | // elements. |
3907 | 432 | if (432 OpNos[ModelOpNo] == 1 - RealOpNo432 ) |
3908 | 5 | return false; |
3909 | 427 | OpNos[ModelOpNo] = RealOpNo; |
3910 | 427 | } |
3911 | 1.01k | } |
3912 | 51 | StartIndex = Shift; |
3913 | 51 | return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); |
3914 | 113 | } |
3915 | | |
3916 | | // Create a node that performs P on operands Op0 and Op1, casting the |
3917 | | // operands to the appropriate type. The type of the result is determined by P. |
3918 | | static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, |
3919 | 1.17k | const Permute &P, SDValue Op0, SDValue Op1) { |
3920 | 1.17k | // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input |
3921 | 1.17k | // elements of a PACK are twice as wide as the outputs. |
3922 | 15 | unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 : |
3923 | 1.16k | P.Opcode == SystemZISD::PACK ? 1.16k P.Operand * 2308 : |
3924 | 856 | P.Operand); |
3925 | 1.17k | // Cast both operands to the appropriate type. |
3926 | 1.17k | MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8), |
3927 | 1.17k | SystemZ::VectorBytes / InBytes); |
3928 | 1.17k | Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0); |
3929 | 1.17k | Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1); |
3930 | 1.17k | SDValue Op; |
3931 | 1.17k | if (P.Opcode == SystemZISD::PERMUTE_DWORDS1.17k ) { |
3932 | 15 | SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32); |
3933 | 15 | Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2); |
3934 | 1.17k | } else if (1.16k P.Opcode == SystemZISD::PACK1.16k ) { |
3935 | 308 | MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), |
3936 | 308 | SystemZ::VectorBytes / P.Operand); |
3937 | 308 | Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1); |
3938 | 1.16k | } else { |
3939 | 856 | Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1); |
3940 | 856 | } |
3941 | 1.17k | return Op; |
3942 | 1.17k | } |
3943 | | |
3944 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
3945 | | // undefined bytes. Implement it on operands Ops[0] and Ops[1] using |
3946 | | // VSLDI or VPERM. |
3947 | | static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, |
3948 | | SDValue *Ops, |
3949 | 113 | const SmallVectorImpl<int> &Bytes) { |
3950 | 339 | for (unsigned I = 0; I < 2339 ; ++I226 ) |
3951 | 226 | Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]); |
3952 | 113 | |
3953 | 113 | // First see whether VSLDI can be used. |
3954 | 113 | unsigned StartIndex, OpNo0, OpNo1; |
3955 | 113 | if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1)) |
3956 | 51 | return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], |
3957 | 51 | Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32)); |
3958 | 62 | |
3959 | 62 | // Fall back on VPERM. Construct an SDNode for the permute vector. |
3960 | 62 | SDValue IndexNodes[SystemZ::VectorBytes]; |
3961 | 1.05k | for (unsigned I = 0; I < SystemZ::VectorBytes1.05k ; ++I992 ) |
3962 | 992 | if (992 Bytes[I] >= 0992 ) |
3963 | 396 | IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32); |
3964 | 992 | else |
3965 | 596 | IndexNodes[I] = DAG.getUNDEF(MVT::i32); |
3966 | 113 | SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); |
3967 | 113 | return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2); |
3968 | 113 | } |
3969 | | |
3970 | | namespace { |
3971 | | // Describes a general N-operand vector shuffle. |
3972 | | struct GeneralShuffle { |
3973 | 1.40k | GeneralShuffle(EVT vt) : VT(vt) {} |
3974 | | void addUndef(); |
3975 | | bool add(SDValue, unsigned); |
3976 | | SDValue getNode(SelectionDAG &, const SDLoc &); |
3977 | | |
3978 | | // The operands of the shuffle. |
3979 | | SmallVector<SDValue, SystemZ::VectorBytes> Ops; |
3980 | | |
3981 | | // Index I is -1 if byte I of the result is undefined. Otherwise the |
3982 | | // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand |
3983 | | // Bytes[I] / SystemZ::VectorBytes. |
3984 | | SmallVector<int, SystemZ::VectorBytes> Bytes; |
3985 | | |
3986 | | // The type of the shuffle result. |
3987 | | EVT VT; |
3988 | | }; |
3989 | | } |
3990 | | |
3991 | | // Add an extra undefined element to the shuffle. |
3992 | 3.74k | void GeneralShuffle::addUndef() { |
3993 | 3.74k | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); |
3994 | 13.1k | for (unsigned I = 0; I < BytesPerElement13.1k ; ++I9.41k ) |
3995 | 9.41k | Bytes.push_back(-1); |
3996 | 3.74k | } |
3997 | | |
3998 | | // Add an extra element to the shuffle, taking it from element Elem of Op. |
3999 | | // A null Op indicates a vector input whose value will be calculated later; |
4000 | | // there is at most one such input per shuffle and it always has the same |
4001 | | // type as the result. Aborts and returns false if the source vector elements |
4002 | | // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per |
4003 | | // LLVM they become implicitly extended, but this is rare and not optimized. |
4004 | 4.76k | bool GeneralShuffle::add(SDValue Op, unsigned Elem) { |
4005 | 4.76k | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); |
4006 | 4.76k | |
4007 | 4.76k | // The source vector can have wider elements than the result, |
4008 | 4.76k | // either through an explicit TRUNCATE or because of type legalization. |
4009 | 4.76k | // We want the least significant part. |
4010 | 4.76k | EVT FromVT = Op.getNode() ? Op.getValueType()4.11k : VT653 ; |
4011 | 4.76k | unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); |
4012 | 4.76k | |
4013 | 4.76k | // Return false if the source elements are smaller than their destination |
4014 | 4.76k | // elements. |
4015 | 4.76k | if (FromBytesPerElement < BytesPerElement) |
4016 | 5 | return false; |
4017 | 4.76k | |
4018 | 4.76k | unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + |
4019 | 4.76k | (FromBytesPerElement - BytesPerElement)); |
4020 | 4.76k | |
4021 | 4.76k | // Look through things like shuffles and bitcasts. |
4022 | 5.41k | while (Op.getNode()5.41k ) { |
4023 | 4.76k | if (Op.getOpcode() == ISD::BITCAST) |
4024 | 614 | Op = Op.getOperand(0); |
4025 | 4.14k | else if (4.14k Op.getOpcode() == ISD::VECTOR_SHUFFLE && 4.14k Op.hasOneUse()41 ) { |
4026 | 41 | // See whether the bytes we need come from a contiguous part of one |
4027 | 41 | // operand. |
4028 | 41 | SmallVector<int, SystemZ::VectorBytes> OpBytes; |
4029 | 41 | getVPermMask(cast<ShuffleVectorSDNode>(Op), OpBytes); |
4030 | 41 | int NewByte; |
4031 | 41 | if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte)) |
4032 | 0 | break; |
4033 | 41 | if (41 NewByte < 041 ) { |
4034 | 0 | addUndef(); |
4035 | 0 | return true; |
4036 | 0 | } |
4037 | 41 | Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); |
4038 | 41 | Byte = unsigned(NewByte) % SystemZ::VectorBytes; |
4039 | 4.14k | } else if (4.10k Op.isUndef()4.10k ) { |
4040 | 0 | addUndef(); |
4041 | 0 | return true; |
4042 | 0 | } else |
4043 | 4.10k | break; |
4044 | 4.76k | } |
4045 | 4.76k | |
4046 | 4.76k | // Make sure that the source of the extraction is in Ops. |
4047 | 4.76k | unsigned OpNo = 0; |
4048 | 5.92k | for (; OpNo < Ops.size()5.92k ; ++OpNo1.16k ) |
4049 | 4.23k | if (4.23k Ops[OpNo] == Op4.23k ) |
4050 | 3.07k | break; |
4051 | 4.76k | if (OpNo == Ops.size()) |
4052 | 1.68k | Ops.push_back(Op); |
4053 | 4.76k | |
4054 | 4.76k | // Add the element to Bytes. |
4055 | 4.76k | unsigned Base = OpNo * SystemZ::VectorBytes + Byte; |
4056 | 17.7k | for (unsigned I = 0; I < BytesPerElement17.7k ; ++I12.9k ) |
4057 | 12.9k | Bytes.push_back(Base + I); |
4058 | 4.76k | |
4059 | 4.76k | return true; |
4060 | 4.76k | } |
4061 | | |
4062 | | // Return SDNodes for the completed shuffle. |
4063 | 1.22k | SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) { |
4064 | 1.22k | assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"); |
4065 | 1.22k | |
4066 | 1.22k | if (Ops.size() == 0) |
4067 | 0 | return DAG.getUNDEF(VT); |
4068 | 1.22k | |
4069 | 1.22k | // Make sure that there are at least two shuffle operands. |
4070 | 1.22k | if (1.22k Ops.size() == 11.22k ) |
4071 | 1.00k | Ops.push_back(DAG.getUNDEF(MVT::v16i8)); |
4072 | 1.22k | |
4073 | 1.22k | // Create a tree of shuffles, deferring root node until after the loop. |
4074 | 1.22k | // Try to redistribute the undefined elements of non-root nodes so that |
4075 | 1.22k | // the non-root shuffles match something like a pack or merge, then adjust |
4076 | 1.22k | // the parent node's permute vector to compensate for the new order. |
4077 | 1.22k | // Among other things, this copes with vectors like <2 x i16> that were |
4078 | 1.22k | // padded with undefined elements during type legalization. |
4079 | 1.22k | // |
4080 | 1.22k | // In the best case this redistribution will lead to the whole tree |
4081 | 1.22k | // using packs and merges. It should rarely be a loss in other cases. |
4082 | 1.22k | unsigned Stride = 1; |
4083 | 1.25k | for (; Stride * 2 < Ops.size()1.25k ; Stride *= 227 ) { |
4084 | 93 | for (unsigned I = 0; I < Ops.size() - Stride93 ; I += Stride * 266 ) { |
4085 | 66 | SDValue SubOps[] = { Ops[I], Ops[I + Stride] }; |
4086 | 66 | |
4087 | 66 | // Create a mask for just these two operands. |
4088 | 66 | SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes); |
4089 | 1.12k | for (unsigned J = 0; J < SystemZ::VectorBytes1.12k ; ++J1.05k ) { |
4090 | 1.05k | unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes; |
4091 | 1.05k | unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes; |
4092 | 1.05k | if (OpNo == I) |
4093 | 208 | NewBytes[J] = Byte; |
4094 | 848 | else if (848 OpNo == I + Stride848 ) |
4095 | 208 | NewBytes[J] = SystemZ::VectorBytes + Byte; |
4096 | 848 | else |
4097 | 640 | NewBytes[J] = -1; |
4098 | 1.05k | } |
4099 | 66 | // See if it would be better to reorganize NewMask to avoid using VPERM. |
4100 | 66 | SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes); |
4101 | 66 | if (const Permute *P66 = matchDoublePermute(NewBytes, NewBytesMap)) { |
4102 | 66 | Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]); |
4103 | 66 | // Applying NewBytesMap to Ops[I] gets back to NewBytes. |
4104 | 1.12k | for (unsigned J = 0; J < SystemZ::VectorBytes1.12k ; ++J1.05k ) { |
4105 | 1.05k | if (NewBytes[J] >= 01.05k ) { |
4106 | 416 | assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && |
4107 | 416 | "Invalid double permute"); |
4108 | 416 | Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J]; |
4109 | 416 | } else |
4110 | 1.05k | assert(NewBytesMap[J] < 0 && "Invalid double permute"); |
4111 | 1.05k | } |
4112 | 0 | } else { |
4113 | 0 | // Just use NewBytes on the operands. |
4114 | 0 | Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes); |
4115 | 0 | for (unsigned J = 0; J < SystemZ::VectorBytes0 ; ++J0 ) |
4116 | 0 | if (0 NewBytes[J] >= 00 ) |
4117 | 0 | Bytes[J] = I * SystemZ::VectorBytes + J; |
4118 | 0 | } |
4119 | 66 | } |
4120 | 27 | } |
4121 | 1.22k | |
4122 | 1.22k | // Now we just have 2 inputs. Put the second operand in Ops[1]. |
4123 | 1.22k | if (Stride > 11.22k ) { |
4124 | 21 | Ops[1] = Ops[Stride]; |
4125 | 357 | for (unsigned I = 0; I < SystemZ::VectorBytes357 ; ++I336 ) |
4126 | 336 | if (336 Bytes[I] >= int(SystemZ::VectorBytes)336 ) |
4127 | 160 | Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; |
4128 | 21 | } |
4129 | 1.22k | |
4130 | 1.22k | // Look for an instruction that can do the permute without resorting |
4131 | 1.22k | // to VPERM. |
4132 | 1.22k | unsigned OpNo0, OpNo1; |
4133 | 1.22k | SDValue Op; |
4134 | 1.22k | if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1)) |
4135 | 1.11k | Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]); |
4136 | 1.22k | else |
4137 | 113 | Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes); |
4138 | 1.22k | return DAG.getNode(ISD::BITCAST, DL, VT, Op); |
4139 | 1.22k | } |
4140 | | |
4141 | | // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion. |
4142 | 42 | static bool isScalarToVector(SDValue Op) { |
4143 | 52 | for (unsigned I = 1, E = Op.getNumOperands(); I != E52 ; ++I10 ) |
4144 | 46 | if (46 !Op.getOperand(I).isUndef()46 ) |
4145 | 36 | return false; |
4146 | 6 | return true; |
4147 | 42 | } |
4148 | | |
4149 | | // Return a vector of type VT that contains Value in the first element. |
4150 | | // The other elements don't matter. |
4151 | | static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
4152 | 64 | SDValue Value) { |
4153 | 64 | // If we have a constant, replicate it to all elements and let the |
4154 | 64 | // BUILD_VECTOR lowering take care of it. |
4155 | 64 | if (Value.getOpcode() == ISD::Constant || |
4156 | 64 | Value.getOpcode() == ISD::ConstantFP64 ) { |
4157 | 11 | SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); |
4158 | 11 | return DAG.getBuildVector(VT, DL, Ops); |
4159 | 11 | } |
4160 | 53 | if (53 Value.isUndef()53 ) |
4161 | 0 | return DAG.getUNDEF(VT); |
4162 | 53 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); |
4163 | 53 | } |
4164 | | |
4165 | | // Return a vector of type VT in which Op0 is in element 0 and Op1 is in |
4166 | | // element 1. Used for cases in which replication is cheap. |
4167 | | static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
4168 | 35 | SDValue Op0, SDValue Op1) { |
4169 | 35 | if (Op0.isUndef()35 ) { |
4170 | 5 | if (Op1.isUndef()) |
4171 | 3 | return DAG.getUNDEF(VT); |
4172 | 2 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); |
4173 | 2 | } |
4174 | 30 | if (30 Op1.isUndef()30 ) |
4175 | 1 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); |
4176 | 29 | return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, |
4177 | 29 | buildScalarToVector(DAG, DL, VT, Op0), |
4178 | 29 | buildScalarToVector(DAG, DL, VT, Op1)); |
4179 | 29 | } |
4180 | | |
4181 | | // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64 |
4182 | | // vector for them. |
4183 | | static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, |
4184 | 59 | SDValue Op1) { |
4185 | 59 | if (Op0.isUndef() && 59 Op1.isUndef()2 ) |
4186 | 0 | return DAG.getUNDEF(MVT::v2i64); |
4187 | 59 | // If one of the two inputs is undefined then replicate the other one, |
4188 | 59 | // in order to avoid using another register unnecessarily. |
4189 | 59 | if (59 Op0.isUndef()59 ) |
4190 | 2 | Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); |
4191 | 57 | else if (57 Op1.isUndef()57 ) |
4192 | 5 | Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); |
4193 | 52 | else { |
4194 | 52 | Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); |
4195 | 52 | Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); |
4196 | 52 | } |
4197 | 59 | return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1); |
4198 | 59 | } |
4199 | | |
4200 | | // Try to represent constant BUILD_VECTOR node BVN using a |
4201 | | // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask |
4202 | | // on success. |
4203 | 514 | static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) { |
4204 | 514 | EVT ElemVT = BVN->getValueType(0).getVectorElementType(); |
4205 | 514 | unsigned BytesPerElement = ElemVT.getStoreSize(); |
4206 | 1.99k | for (unsigned I = 0, E = BVN->getNumOperands(); I != E1.99k ; ++I1.48k ) { |
4207 | 1.82k | SDValue Op = BVN->getOperand(I); |
4208 | 1.82k | if (!Op.isUndef()1.82k ) { |
4209 | 1.59k | uint64_t Value; |
4210 | 1.59k | if (Op.getOpcode() == ISD::Constant) |
4211 | 1.45k | Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue(); |
4212 | 134 | else if (134 Op.getOpcode() == ISD::ConstantFP134 ) |
4213 | 134 | Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt() |
4214 | 134 | .getZExtValue()); |
4215 | 134 | else |
4216 | 0 | return false; |
4217 | 4.54k | for (unsigned J = 0; 1.59k J < BytesPerElement4.54k ; ++J2.95k ) { |
4218 | 3.29k | uint64_t Byte = (Value >> (J * 8)) & 0xff; |
4219 | 3.29k | if (Byte == 0xff) |
4220 | 1.11k | Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J); |
4221 | 2.18k | else if (2.18k Byte != 02.18k ) |
4222 | 345 | return false; |
4223 | 3.29k | } |
4224 | 1.59k | } |
4225 | 1.82k | } |
4226 | 169 | return true; |
4227 | 514 | } |
4228 | | |
4229 | | // Try to load a vector constant in which BitsPerElement-bit value Value |
4230 | | // is replicated to fill the vector. VT is the type of the resulting |
4231 | | // constant, which may have elements of a different size from BitsPerElement. |
4232 | | // Return the SDValue of the constant on success, otherwise return |
4233 | | // an empty value. |
4234 | | static SDValue tryBuildVectorReplicate(SelectionDAG &DAG, |
4235 | | const SystemZInstrInfo *TII, |
4236 | | const SDLoc &DL, EVT VT, uint64_t Value, |
4237 | 363 | unsigned BitsPerElement) { |
4238 | 363 | // Signed 16-bit values can be replicated using VREPI. |
4239 | 363 | int64_t SignedValue = SignExtend64(Value, BitsPerElement); |
4240 | 363 | if (isInt<16>(SignedValue)363 ) { |
4241 | 159 | MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), |
4242 | 159 | SystemZ::VectorBits / BitsPerElement); |
4243 | 159 | SDValue Op = DAG.getNode(SystemZISD::REPLICATE, DL, VecVT, |
4244 | 159 | DAG.getConstant(SignedValue, DL, MVT::i32)); |
4245 | 159 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); |
4246 | 159 | } |
4247 | 204 | // See whether rotating the constant left some N places gives a value that |
4248 | 204 | // is one less than a power of 2 (i.e. all zeros followed by all ones). |
4249 | 204 | // If so we can use VGM. |
4250 | 204 | unsigned Start, End; |
4251 | 204 | if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)204 ) { |
4252 | 93 | // isRxSBGMask returns the bit numbers for a full 64-bit value, |
4253 | 93 | // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to |
4254 | 93 | // bit numbers for an BitsPerElement value, so that 0 denotes |
4255 | 93 | // 1 << (BitsPerElement-1). |
4256 | 93 | Start -= 64 - BitsPerElement; |
4257 | 93 | End -= 64 - BitsPerElement; |
4258 | 93 | MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), |
4259 | 93 | SystemZ::VectorBits / BitsPerElement); |
4260 | 93 | SDValue Op = DAG.getNode(SystemZISD::ROTATE_MASK, DL, VecVT, |
4261 | 93 | DAG.getConstant(Start, DL, MVT::i32), |
4262 | 93 | DAG.getConstant(End, DL, MVT::i32)); |
4263 | 93 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); |
4264 | 93 | } |
4265 | 111 | return SDValue(); |
4266 | 111 | } |
4267 | | |
4268 | | // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually |
4269 | | // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for |
4270 | | // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR |
4271 | | // would benefit from this representation and return it if so. |
4272 | | static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, |
4273 | 316 | BuildVectorSDNode *BVN) { |
4274 | 316 | EVT VT = BVN->getValueType(0); |
4275 | 316 | unsigned NumElements = VT.getVectorNumElements(); |
4276 | 316 | |
4277 | 316 | // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation |
4278 | 316 | // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still |
4279 | 316 | // need a BUILD_VECTOR, add an additional placeholder operand for that |
4280 | 316 | // BUILD_VECTOR and store its operands in ResidueOps. |
4281 | 316 | GeneralShuffle GS(VT); |
4282 | 316 | SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps; |
4283 | 316 | bool FoundOne = false; |
4284 | 2.81k | for (unsigned I = 0; I < NumElements2.81k ; ++I2.49k ) { |
4285 | 2.50k | SDValue Op = BVN->getOperand(I); |
4286 | 2.50k | if (Op.getOpcode() == ISD::TRUNCATE) |
4287 | 90 | Op = Op.getOperand(0); |
4288 | 2.50k | if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
4289 | 2.50k | Op.getOperand(1).getOpcode() == ISD::Constant933 ) { |
4290 | 933 | unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
4291 | 933 | if (!GS.add(Op.getOperand(0), Elem)) |
4292 | 5 | return SDValue(); |
4293 | 928 | FoundOne = true; |
4294 | 2.50k | } else if (1.56k Op.isUndef()1.56k ) { |
4295 | 915 | GS.addUndef(); |
4296 | 1.56k | } else { |
4297 | 653 | if (!GS.add(SDValue(), ResidueOps.size())) |
4298 | 0 | return SDValue(); |
4299 | 653 | ResidueOps.push_back(BVN->getOperand(I)); |
4300 | 653 | } |
4301 | 2.50k | } |
4302 | 316 | |
4303 | 316 | // Nothing to do if there are no EXTRACT_VECTOR_ELTs. |
4304 | 311 | if (311 !FoundOne311 ) |
4305 | 173 | return SDValue(); |
4306 | 138 | |
4307 | 138 | // Create the BUILD_VECTOR for the remaining elements, if any. |
4308 | 138 | if (138 !ResidueOps.empty()138 ) { |
4309 | 36 | while (ResidueOps.size() < NumElements) |
4310 | 30 | ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType())); |
4311 | 10 | for (auto &Op : GS.Ops) { |
4312 | 10 | if (!Op.getNode()10 ) { |
4313 | 6 | Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); |
4314 | 6 | break; |
4315 | 6 | } |
4316 | 138 | } |
4317 | 6 | } |
4318 | 138 | return GS.getNode(DAG, SDLoc(BVN)); |
4319 | 316 | } |
4320 | | |
4321 | | // Combine GPR scalar values Elems into a vector of type VT. |
4322 | | static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
4323 | 172 | SmallVectorImpl<SDValue> &Elems) { |
4324 | 172 | // See whether there is a single replicated value. |
4325 | 172 | SDValue Single; |
4326 | 172 | unsigned int NumElements = Elems.size(); |
4327 | 172 | unsigned int Count = 0; |
4328 | 757 | for (auto Elem : Elems) { |
4329 | 757 | if (!Elem.isUndef()757 ) { |
4330 | 496 | if (!Single.getNode()) |
4331 | 172 | Single = Elem; |
4332 | 324 | else if (324 Elem != Single324 ) { |
4333 | 78 | Single = SDValue(); |
4334 | 78 | break; |
4335 | 78 | } |
4336 | 418 | Count += 1; |
4337 | 418 | } |
4338 | 757 | } |
4339 | 172 | // There are three cases here: |
4340 | 172 | // |
4341 | 172 | // - if the only defined element is a loaded one, the best sequence |
4342 | 172 | // is a replicating load. |
4343 | 172 | // |
4344 | 172 | // - otherwise, if the only defined element is an i64 value, we will |
4345 | 172 | // end up with the same VLVGP sequence regardless of whether we short-cut |
4346 | 172 | // for replication or fall through to the later code. |
4347 | 172 | // |
4348 | 172 | // - otherwise, if the only defined element is an i32 or smaller value, |
4349 | 172 | // we would need 2 instructions to replicate it: VLVGP followed by VREPx. |
4350 | 172 | // This is only a win if the single defined element is used more than once. |
4351 | 172 | // In other cases we're better off using a single VLVGx. |
4352 | 172 | if (Single.getNode() && 172 (Count > 1 || 94 Single.getOpcode() == ISD::LOAD55 )) |
4353 | 62 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); |
4354 | 110 | |
4355 | 110 | // If all elements are loads, use VLREP/VLEs (below). |
4356 | 110 | bool AllLoads = true; |
4357 | 110 | for (auto Elem : Elems) |
4358 | 114 | if (114 Elem.getOpcode() != ISD::LOAD || 114 cast<LoadSDNode>(Elem)->isIndexed()4 ) { |
4359 | 110 | AllLoads = false; |
4360 | 110 | break; |
4361 | 110 | } |
4362 | 110 | |
4363 | 110 | // The best way of building a v2i64 from two i64s is to use VLVGP. |
4364 | 110 | if (VT == MVT::v2i64 && 110 !AllLoads35 ) |
4365 | 35 | return joinDwords(DAG, DL, Elems[0], Elems[1]); |
4366 | 75 | |
4367 | 75 | // Use a 64-bit merge high to combine two doubles. |
4368 | 75 | if (75 VT == MVT::v2f64 && 75 !AllLoads11 ) |
4369 | 11 | return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); |
4370 | 64 | |
4371 | 64 | // Build v4f32 values directly from the FPRs: |
4372 | 64 | // |
4373 | 64 | // <Axxx> <Bxxx> <Cxxxx> <Dxxx> |
4374 | 64 | // V V VMRHF |
4375 | 64 | // <ABxx> <CDxx> |
4376 | 64 | // V VMRHG |
4377 | 64 | // <ABCD> |
4378 | 64 | if (64 VT == MVT::v4f32 && 64 !AllLoads12 ) { |
4379 | 12 | SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); |
4380 | 12 | SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); |
4381 | 12 | // Avoid unnecessary undefs by reusing the other operand. |
4382 | 12 | if (Op01.isUndef()) |
4383 | 1 | Op01 = Op23; |
4384 | 11 | else if (11 Op23.isUndef()11 ) |
4385 | 2 | Op23 = Op01; |
4386 | 12 | // Merging identical replications is a no-op. |
4387 | 12 | if (Op01.getOpcode() == SystemZISD::REPLICATE && 12 Op01 == Op231 ) |
4388 | 1 | return Op01; |
4389 | 11 | Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01); |
4390 | 11 | Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23); |
4391 | 11 | SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH, |
4392 | 11 | DL, MVT::v2i64, Op01, Op23); |
4393 | 11 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); |
4394 | 11 | } |
4395 | 52 | |
4396 | 52 | // Collect the constant terms. |
4397 | 52 | SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue()); |
4398 | 52 | SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false); |
4399 | 52 | |
4400 | 52 | unsigned NumConstants = 0; |
4401 | 492 | for (unsigned I = 0; I < NumElements492 ; ++I440 ) { |
4402 | 440 | SDValue Elem = Elems[I]; |
4403 | 440 | if (Elem.getOpcode() == ISD::Constant || |
4404 | 440 | Elem.getOpcode() == ISD::ConstantFP341 ) { |
4405 | 99 | NumConstants += 1; |
4406 | 99 | Constants[I] = Elem; |
4407 | 99 | Done[I] = true; |
4408 | 99 | } |
4409 | 440 | } |
4410 | 52 | // If there was at least one constant, fill in the other elements of |
4411 | 52 | // Constants with undefs to get a full vector constant and use that |
4412 | 52 | // as the starting point. |
4413 | 52 | SDValue Result; |
4414 | 52 | if (NumConstants > 052 ) { |
4415 | 127 | for (unsigned I = 0; I < NumElements127 ; ++I116 ) |
4416 | 116 | if (116 !Constants[I].getNode()116 ) |
4417 | 17 | Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); |
4418 | 11 | Result = DAG.getBuildVector(VT, DL, Constants); |
4419 | 52 | } else { |
4420 | 41 | // Otherwise try to use VLREP or VLVGP to start the sequence in order to |
4421 | 41 | // avoid a false dependency on any previous contents of the vector |
4422 | 41 | // register. |
4423 | 41 | |
4424 | 41 | // Use a VLREP if at least one element is a load. |
4425 | 41 | unsigned LoadElIdx = UINT_MAX; |
4426 | 365 | for (unsigned I = 0; I < NumElements365 ; ++I324 ) |
4427 | 324 | if (324 Elems[I].getOpcode() == ISD::LOAD && |
4428 | 324 | cast<LoadSDNode>(Elems[I])->isUnindexed()0 ) { |
4429 | 0 | LoadElIdx = I; |
4430 | 0 | break; |
4431 | 0 | } |
4432 | 41 | if (LoadElIdx != UINT_MAX41 ) { |
4433 | 0 | Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, Elems[LoadElIdx]); |
4434 | 0 | Done[LoadElIdx] = true; |
4435 | 41 | } else { |
4436 | 41 | // Try to use VLVGP. |
4437 | 41 | unsigned I1 = NumElements / 2 - 1; |
4438 | 41 | unsigned I2 = NumElements - 1; |
4439 | 41 | bool Def1 = !Elems[I1].isUndef(); |
4440 | 41 | bool Def2 = !Elems[I2].isUndef(); |
4441 | 41 | if (Def1 || 41 Def220 ) { |
4442 | 24 | SDValue Elem1 = Elems[Def1 ? I121 : I23 ]; |
4443 | 24 | SDValue Elem2 = Elems[Def2 ? I219 : I15 ]; |
4444 | 24 | Result = DAG.getNode(ISD::BITCAST, DL, VT, |
4445 | 24 | joinDwords(DAG, DL, Elem1, Elem2)); |
4446 | 24 | Done[I1] = true; |
4447 | 24 | Done[I2] = true; |
4448 | 24 | } else |
4449 | 17 | Result = DAG.getUNDEF(VT); |
4450 | 41 | } |
4451 | 41 | } |
4452 | 52 | |
4453 | 52 | // Use VLVGx to insert the other elements. |
4454 | 492 | for (unsigned I = 0; I < NumElements492 ; ++I440 ) |
4455 | 440 | if (440 !Done[I] && 440 !Elems[I].isUndef()293 ) |
4456 | 123 | Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], |
4457 | 123 | DAG.getConstant(I, DL, MVT::i32)); |
4458 | 172 | return Result; |
4459 | 172 | } |
4460 | | |
4461 | | SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op, |
4462 | 830 | SelectionDAG &DAG) const { |
4463 | 830 | const SystemZInstrInfo *TII = |
4464 | 830 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
4465 | 830 | auto *BVN = cast<BuildVectorSDNode>(Op.getNode()); |
4466 | 830 | SDLoc DL(Op); |
4467 | 830 | EVT VT = Op.getValueType(); |
4468 | 830 | |
4469 | 830 | if (BVN->isConstant()830 ) { |
4470 | 514 | // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- |
4471 | 514 | // preferred way of creating all-zero and all-one vectors so give it |
4472 | 514 | // priority over other methods below. |
4473 | 514 | uint64_t Mask = 0; |
4474 | 514 | if (tryBuildVectorByteMask(BVN, Mask)514 ) { |
4475 | 169 | SDValue Op = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, |
4476 | 169 | DAG.getConstant(Mask, DL, MVT::i32)); |
4477 | 169 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); |
4478 | 169 | } |
4479 | 345 | |
4480 | 345 | // Try using some form of replication. |
4481 | 345 | APInt SplatBits, SplatUndef; |
4482 | 345 | unsigned SplatBitSize; |
4483 | 345 | bool HasAnyUndefs; |
4484 | 345 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
4485 | 345 | 8, true) && |
4486 | 345 | SplatBitSize <= 64345 ) { |
4487 | 306 | // First try assuming that any undefined bits above the highest set bit |
4488 | 306 | // and below the lowest set bit are 1s. This increases the likelihood of |
4489 | 306 | // being able to use a sign-extended element value in VECTOR REPLICATE |
4490 | 306 | // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK. |
4491 | 306 | uint64_t SplatBitsZ = SplatBits.getZExtValue(); |
4492 | 306 | uint64_t SplatUndefZ = SplatUndef.getZExtValue(); |
4493 | 306 | uint64_t Lower = (SplatUndefZ |
4494 | 306 | & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1)); |
4495 | 306 | uint64_t Upper = (SplatUndefZ |
4496 | 306 | & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1)); |
4497 | 306 | uint64_t Value = SplatBitsZ | Upper | Lower; |
4498 | 306 | SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, |
4499 | 306 | SplatBitSize); |
4500 | 306 | if (Op.getNode()) |
4501 | 249 | return Op; |
4502 | 57 | |
4503 | 57 | // Now try assuming that any undefined bits between the first and |
4504 | 57 | // last defined set bits are set. This increases the chances of |
4505 | 57 | // using a non-wraparound mask. |
4506 | 57 | uint64_t Middle = SplatUndefZ & ~Upper & ~Lower; |
4507 | 57 | Value = SplatBitsZ | Middle; |
4508 | 57 | Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize); |
4509 | 57 | if (Op.getNode()) |
4510 | 3 | return Op; |
4511 | 93 | } |
4512 | 93 | |
4513 | 93 | // Fall back to loading it from memory. |
4514 | 93 | return SDValue(); |
4515 | 93 | } |
4516 | 316 | |
4517 | 316 | // See if we should use shuffles to construct the vector from other vectors. |
4518 | 316 | if (SDValue 316 Res316 = tryBuildVectorShuffle(DAG, BVN)) |
4519 | 138 | return Res; |
4520 | 178 | |
4521 | 178 | // Detect SCALAR_TO_VECTOR conversions. |
4522 | 178 | if (178 isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && 178 isScalarToVector(Op)42 ) |
4523 | 6 | return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); |
4524 | 172 | |
4525 | 172 | // Otherwise use buildVector to build the vector up from GPRs. |
4526 | 172 | unsigned NumElements = Op.getNumOperands(); |
4527 | 172 | SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements); |
4528 | 1.11k | for (unsigned I = 0; I < NumElements1.11k ; ++I946 ) |
4529 | 946 | Ops[I] = Op.getOperand(I); |
4530 | 830 | return buildVector(DAG, DL, VT, Ops); |
4531 | 830 | } |
4532 | | |
4533 | | SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, |
4534 | 1.10k | SelectionDAG &DAG) const { |
4535 | 1.10k | auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode()); |
4536 | 1.10k | SDLoc DL(Op); |
4537 | 1.10k | EVT VT = Op.getValueType(); |
4538 | 1.10k | unsigned NumElements = VT.getVectorNumElements(); |
4539 | 1.10k | |
4540 | 1.10k | if (VSN->isSplat()1.10k ) { |
4541 | 17 | SDValue Op0 = Op.getOperand(0); |
4542 | 17 | unsigned Index = VSN->getSplatIndex(); |
4543 | 17 | assert(Index < VT.getVectorNumElements() && |
4544 | 17 | "Splat index should be defined and in first operand"); |
4545 | 17 | // See whether the value we're splatting is directly available as a scalar. |
4546 | 17 | if ((Index == 0 && 17 Op0.getOpcode() == ISD::SCALAR_TO_VECTOR7 ) || |
4547 | 17 | Op0.getOpcode() == ISD::BUILD_VECTOR) |
4548 | 0 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); |
4549 | 17 | // Otherwise keep it as a vector-to-vector operation. |
4550 | 17 | return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), |
4551 | 17 | DAG.getConstant(Index, DL, MVT::i32)); |
4552 | 17 | } |
4553 | 1.08k | |
4554 | 1.08k | GeneralShuffle GS(VT); |
4555 | 7.09k | for (unsigned I = 0; I < NumElements7.09k ; ++I6.00k ) { |
4556 | 6.00k | int Elt = VSN->getMaskElt(I); |
4557 | 6.00k | if (Elt < 0) |
4558 | 2.82k | GS.addUndef(); |
4559 | 3.18k | else if (3.18k !GS.add(Op.getOperand(unsigned(Elt) / NumElements), |
4560 | 3.18k | unsigned(Elt) % NumElements)) |
4561 | 0 | return SDValue(); |
4562 | 6.00k | } |
4563 | 1.08k | return GS.getNode(DAG, SDLoc(VSN)); |
4564 | 1.10k | } |
4565 | | |
4566 | | SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, |
4567 | 25 | SelectionDAG &DAG) const { |
4568 | 25 | SDLoc DL(Op); |
4569 | 25 | // Just insert the scalar into element 0 of an undefined vector. |
4570 | 25 | return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, |
4571 | 25 | Op.getValueType(), DAG.getUNDEF(Op.getValueType()), |
4572 | 25 | Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32)); |
4573 | 25 | } |
4574 | | |
4575 | | SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, |
4576 | 34 | SelectionDAG &DAG) const { |
4577 | 34 | // Handle insertions of floating-point values. |
4578 | 34 | SDLoc DL(Op); |
4579 | 34 | SDValue Op0 = Op.getOperand(0); |
4580 | 34 | SDValue Op1 = Op.getOperand(1); |
4581 | 34 | SDValue Op2 = Op.getOperand(2); |
4582 | 34 | EVT VT = Op.getValueType(); |
4583 | 34 | |
4584 | 34 | // Insertions into constant indices of a v2f64 can be done using VPDI. |
4585 | 34 | // However, if the inserted value is a bitcast or a constant then it's |
4586 | 34 | // better to use GPRs, as below. |
4587 | 34 | if (VT == MVT::v2f64 && |
4588 | 21 | Op1.getOpcode() != ISD::BITCAST && |
4589 | 21 | Op1.getOpcode() != ISD::ConstantFP && |
4590 | 34 | Op2.getOpcode() == ISD::Constant18 ) { |
4591 | 16 | uint64_t Index = dyn_cast<ConstantSDNode>(Op2)->getZExtValue(); |
4592 | 16 | unsigned Mask = VT.getVectorNumElements() - 1; |
4593 | 16 | if (Index <= Mask) |
4594 | 16 | return Op; |
4595 | 18 | } |
4596 | 18 | |
4597 | 18 | // Otherwise bitcast to the equivalent integer form and insert via a GPR. |
4598 | 18 | MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); |
4599 | 18 | MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); |
4600 | 18 | SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, |
4601 | 18 | DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), |
4602 | 18 | DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2); |
4603 | 18 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); |
4604 | 18 | } |
4605 | | |
4606 | | SDValue |
4607 | | SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, |
4608 | 180 | SelectionDAG &DAG) const { |
4609 | 180 | // Handle extractions of floating-point values. |
4610 | 180 | SDLoc DL(Op); |
4611 | 180 | SDValue Op0 = Op.getOperand(0); |
4612 | 180 | SDValue Op1 = Op.getOperand(1); |
4613 | 180 | EVT VT = Op.getValueType(); |
4614 | 180 | EVT VecVT = Op0.getValueType(); |
4615 | 180 | |
4616 | 180 | // Extractions of constant indices can be done directly. |
4617 | 180 | if (auto *CIndexN180 = dyn_cast<ConstantSDNode>(Op1)) { |
4618 | 176 | uint64_t Index = CIndexN->getZExtValue(); |
4619 | 176 | unsigned Mask = VecVT.getVectorNumElements() - 1; |
4620 | 176 | if (Index <= Mask) |
4621 | 176 | return Op; |
4622 | 4 | } |
4623 | 4 | |
4624 | 4 | // Otherwise bitcast to the equivalent integer form and extract via a GPR. |
4625 | 4 | MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); |
4626 | 4 | MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements()); |
4627 | 4 | SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT, |
4628 | 4 | DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1); |
4629 | 4 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); |
4630 | 4 | } |
4631 | | |
4632 | | SDValue |
4633 | | SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG, |
4634 | 483 | unsigned UnpackHigh) const { |
4635 | 483 | SDValue PackedOp = Op.getOperand(0); |
4636 | 483 | EVT OutVT = Op.getValueType(); |
4637 | 483 | EVT InVT = PackedOp.getValueType(); |
4638 | 483 | unsigned ToBits = OutVT.getScalarSizeInBits(); |
4639 | 483 | unsigned FromBits = InVT.getScalarSizeInBits(); |
4640 | 706 | do { |
4641 | 706 | FromBits *= 2; |
4642 | 706 | EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), |
4643 | 706 | SystemZ::VectorBits / FromBits); |
4644 | 706 | PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp); |
4645 | 706 | } while (FromBits != ToBits); |
4646 | 483 | return PackedOp; |
4647 | 483 | } |
4648 | | |
4649 | | SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, |
4650 | 136 | unsigned ByScalar) const { |
4651 | 136 | // Look for cases where a vector shift can use the *_BY_SCALAR form. |
4652 | 136 | SDValue Op0 = Op.getOperand(0); |
4653 | 136 | SDValue Op1 = Op.getOperand(1); |
4654 | 136 | SDLoc DL(Op); |
4655 | 136 | EVT VT = Op.getValueType(); |
4656 | 136 | unsigned ElemBitSize = VT.getScalarSizeInBits(); |
4657 | 136 | |
4658 | 136 | // See whether the shift vector is a splat represented as BUILD_VECTOR. |
4659 | 136 | if (auto *BVN136 = dyn_cast<BuildVectorSDNode>(Op1)) { |
4660 | 100 | APInt SplatBits, SplatUndef; |
4661 | 100 | unsigned SplatBitSize; |
4662 | 100 | bool HasAnyUndefs; |
4663 | 100 | // Check for constant splats. Use ElemBitSize as the minimum element |
4664 | 100 | // width and reject splats that need wider elements. |
4665 | 100 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
4666 | 100 | ElemBitSize, true) && |
4667 | 100 | SplatBitSize == ElemBitSize88 ) { |
4668 | 88 | SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff, |
4669 | 88 | DL, MVT::i32); |
4670 | 88 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); |
4671 | 88 | } |
4672 | 12 | // Check for variable splats. |
4673 | 12 | BitVector UndefElements; |
4674 | 12 | SDValue Splat = BVN->getSplatValue(&UndefElements); |
4675 | 12 | if (Splat12 ) { |
4676 | 12 | // Since i32 is the smallest legal type, we either need a no-op |
4677 | 12 | // or a truncation. |
4678 | 12 | SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat); |
4679 | 12 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); |
4680 | 12 | } |
4681 | 36 | } |
4682 | 36 | |
4683 | 36 | // See whether the shift vector is a splat represented as SHUFFLE_VECTOR, |
4684 | 36 | // and the shift amount is directly available in a GPR. |
4685 | 36 | if (auto *36 VSN36 = dyn_cast<ShuffleVectorSDNode>(Op1)) { |
4686 | 0 | if (VSN->isSplat()0 ) { |
4687 | 0 | SDValue VSNOp0 = VSN->getOperand(0); |
4688 | 0 | unsigned Index = VSN->getSplatIndex(); |
4689 | 0 | assert(Index < VT.getVectorNumElements() && |
4690 | 0 | "Splat index should be defined and in first operand"); |
4691 | 0 | if ((Index == 0 && 0 VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR0 ) || |
4692 | 0 | VSNOp0.getOpcode() == ISD::BUILD_VECTOR0 ) { |
4693 | 0 | // Since i32 is the smallest legal type, we either need a no-op |
4694 | 0 | // or a truncation. |
4695 | 0 | SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, |
4696 | 0 | VSNOp0.getOperand(Index)); |
4697 | 0 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); |
4698 | 0 | } |
4699 | 36 | } |
4700 | 0 | } |
4701 | 36 | |
4702 | 36 | // Otherwise just treat the current form as legal. |
4703 | 36 | return Op; |
4704 | 36 | } |
4705 | | |
4706 | | SDValue SystemZTargetLowering::LowerOperation(SDValue Op, |
4707 | 7.95k | SelectionDAG &DAG) const { |
4708 | 7.95k | switch (Op.getOpcode()) { |
4709 | 2 | case ISD::FRAMEADDR: |
4710 | 2 | return lowerFRAMEADDR(Op, DAG); |
4711 | 1 | case ISD::RETURNADDR: |
4712 | 1 | return lowerRETURNADDR(Op, DAG); |
4713 | 553 | case ISD::BR_CC: |
4714 | 553 | return lowerBR_CC(Op, DAG); |
4715 | 840 | case ISD::SELECT_CC: |
4716 | 840 | return lowerSELECT_CC(Op, DAG); |
4717 | 1.32k | case ISD::SETCC: |
4718 | 1.32k | return lowerSETCC(Op, DAG); |
4719 | 350 | case ISD::GlobalAddress: |
4720 | 350 | return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); |
4721 | 14 | case ISD::GlobalTLSAddress: |
4722 | 14 | return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); |
4723 | 1 | case ISD::BlockAddress: |
4724 | 1 | return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); |
4725 | 3 | case ISD::JumpTable: |
4726 | 3 | return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); |
4727 | 306 | case ISD::ConstantPool: |
4728 | 306 | return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); |
4729 | 13 | case ISD::BITCAST: |
4730 | 13 | return lowerBITCAST(Op, DAG); |
4731 | 0 | case ISD::VASTART: |
4732 | 0 | return lowerVASTART(Op, DAG); |
4733 | 0 | case ISD::VACOPY: |
4734 | 0 | return lowerVACOPY(Op, DAG); |
4735 | 26 | case ISD::DYNAMIC_STACKALLOC: |
4736 | 26 | return lowerDYNAMIC_STACKALLOC(Op, DAG); |
4737 | 3 | case ISD::GET_DYNAMIC_AREA_OFFSET: |
4738 | 3 | return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); |
4739 | 12 | case ISD::SMUL_LOHI: |
4740 | 12 | return lowerSMUL_LOHI(Op, DAG); |
4741 | 22 | case ISD::UMUL_LOHI: |
4742 | 22 | return lowerUMUL_LOHI(Op, DAG); |
4743 | 105 | case ISD::SDIVREM: |
4744 | 105 | return lowerSDIVREM(Op, DAG); |
4745 | 76 | case ISD::UDIVREM: |
4746 | 76 | return lowerUDIVREM(Op, DAG); |
4747 | 432 | case ISD::OR: |
4748 | 432 | return lowerOR(Op, DAG); |
4749 | 11 | case ISD::CTPOP: |
4750 | 11 | return lowerCTPOP(Op, DAG); |
4751 | 7 | case ISD::ATOMIC_FENCE: |
4752 | 7 | return lowerATOMIC_FENCE(Op, DAG); |
4753 | 28 | case ISD::ATOMIC_SWAP: |
4754 | 28 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); |
4755 | 21 | case ISD::ATOMIC_STORE: |
4756 | 21 | return lowerATOMIC_STORE(Op, DAG); |
4757 | 17 | case ISD::ATOMIC_LOAD: |
4758 | 17 | return lowerATOMIC_LOAD(Op, DAG); |
4759 | 94 | case ISD::ATOMIC_LOAD_ADD: |
4760 | 94 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); |
4761 | 72 | case ISD::ATOMIC_LOAD_SUB: |
4762 | 72 | return lowerATOMIC_LOAD_SUB(Op, DAG); |
4763 | 64 | case ISD::ATOMIC_LOAD_AND: |
4764 | 64 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); |
4765 | 64 | case ISD::ATOMIC_LOAD_OR: |
4766 | 64 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); |
4767 | 56 | case ISD::ATOMIC_LOAD_XOR: |
4768 | 56 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); |
4769 | 52 | case ISD::ATOMIC_LOAD_NAND: |
4770 | 52 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); |
4771 | 32 | case ISD::ATOMIC_LOAD_MIN: |
4772 | 32 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); |
4773 | 14 | case ISD::ATOMIC_LOAD_MAX: |
4774 | 14 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); |
4775 | 14 | case ISD::ATOMIC_LOAD_UMIN: |
4776 | 14 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); |
4777 | 14 | case ISD::ATOMIC_LOAD_UMAX: |
4778 | 14 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); |
4779 | 39 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
4780 | 39 | return lowerATOMIC_CMP_SWAP(Op, DAG); |
4781 | 3 | case ISD::STACKSAVE: |
4782 | 3 | return lowerSTACKSAVE(Op, DAG); |
4783 | 2 | case ISD::STACKRESTORE: |
4784 | 2 | return lowerSTACKRESTORE(Op, DAG); |
4785 | 8 | case ISD::PREFETCH: |
4786 | 8 | return lowerPREFETCH(Op, DAG); |
4787 | 50 | case ISD::INTRINSIC_W_CHAIN: |
4788 | 50 | return lowerINTRINSIC_W_CHAIN(Op, DAG); |
4789 | 420 | case ISD::INTRINSIC_WO_CHAIN: |
4790 | 420 | return lowerINTRINSIC_WO_CHAIN(Op, DAG); |
4791 | 830 | case ISD::BUILD_VECTOR: |
4792 | 830 | return lowerBUILD_VECTOR(Op, DAG); |
4793 | 1.10k | case ISD::VECTOR_SHUFFLE: |
4794 | 1.10k | return lowerVECTOR_SHUFFLE(Op, DAG); |
4795 | 25 | case ISD::SCALAR_TO_VECTOR: |
4796 | 25 | return lowerSCALAR_TO_VECTOR(Op, DAG); |
4797 | 34 | case ISD::INSERT_VECTOR_ELT: |
4798 | 34 | return lowerINSERT_VECTOR_ELT(Op, DAG); |
4799 | 180 | case ISD::EXTRACT_VECTOR_ELT: |
4800 | 180 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); |
4801 | 465 | case ISD::SIGN_EXTEND_VECTOR_INREG: |
4802 | 465 | return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH); |
4803 | 18 | case ISD::ZERO_EXTEND_VECTOR_INREG: |
4804 | 18 | return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH); |
4805 | 42 | case ISD::SHL: |
4806 | 42 | return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR); |
4807 | 24 | case ISD::SRL: |
4808 | 24 | return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR); |
4809 | 70 | case ISD::SRA: |
4810 | 70 | return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR); |
4811 | 0 | default: |
4812 | 0 | llvm_unreachable("Unexpected node to lower"); |
4813 | 0 | } |
4814 | 0 | } |
4815 | | |
4816 | | // Lower operations with invalid operand or result types (currently used |
4817 | | // only for 128-bit integer types). |
4818 | | |
4819 | 22 | static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { |
4820 | 22 | SDLoc DL(In); |
4821 | 22 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, |
4822 | 22 | DAG.getIntPtrConstant(0, DL)); |
4823 | 22 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, |
4824 | 22 | DAG.getIntPtrConstant(1, DL)); |
4825 | 22 | SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, |
4826 | 22 | MVT::Untyped, Hi, Lo); |
4827 | 22 | return SDValue(Pair, 0); |
4828 | 22 | } |
4829 | | |
4830 | 11 | static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { |
4831 | 11 | SDLoc DL(In); |
4832 | 11 | SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, |
4833 | 11 | DL, MVT::i64, In); |
4834 | 11 | SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, |
4835 | 11 | DL, MVT::i64, In); |
4836 | 11 | return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); |
4837 | 11 | } |
4838 | | |
4839 | | void |
4840 | | SystemZTargetLowering::LowerOperationWrapper(SDNode *N, |
4841 | | SmallVectorImpl<SDValue> &Results, |
4842 | 13 | SelectionDAG &DAG) const { |
4843 | 13 | switch (N->getOpcode()) { |
4844 | 1 | case ISD::ATOMIC_LOAD: { |
4845 | 1 | SDLoc DL(N); |
4846 | 1 | SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other); |
4847 | 1 | SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; |
4848 | 1 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); |
4849 | 1 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128, |
4850 | 1 | DL, Tys, Ops, MVT::i128, MMO); |
4851 | 1 | Results.push_back(lowerGR128ToI128(DAG, Res)); |
4852 | 1 | Results.push_back(Res.getValue(1)); |
4853 | 1 | break; |
4854 | 13 | } |
4855 | 2 | case ISD::ATOMIC_STORE: { |
4856 | 2 | SDLoc DL(N); |
4857 | 2 | SDVTList Tys = DAG.getVTList(MVT::Other); |
4858 | 2 | SDValue Ops[] = { N->getOperand(0), |
4859 | 2 | lowerI128ToGR128(DAG, N->getOperand(2)), |
4860 | 2 | N->getOperand(1) }; |
4861 | 2 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); |
4862 | 2 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128, |
4863 | 2 | DL, Tys, Ops, MVT::i128, MMO); |
4864 | 2 | // We have to enforce sequential consistency by performing a |
4865 | 2 | // serialization operation after the store. |
4866 | 2 | if (cast<AtomicSDNode>(N)->getOrdering() == |
4867 | 2 | AtomicOrdering::SequentiallyConsistent) |
4868 | 1 | Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, |
4869 | 1 | MVT::Other, Res), 0); |
4870 | 2 | Results.push_back(Res); |
4871 | 2 | break; |
4872 | 13 | } |
4873 | 10 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { |
4874 | 10 | SDLoc DL(N); |
4875 | 10 | SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other, MVT::Glue); |
4876 | 10 | SDValue Ops[] = { N->getOperand(0), N->getOperand(1), |
4877 | 10 | lowerI128ToGR128(DAG, N->getOperand(2)), |
4878 | 10 | lowerI128ToGR128(DAG, N->getOperand(3)) }; |
4879 | 10 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); |
4880 | 10 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128, |
4881 | 10 | DL, Tys, Ops, MVT::i128, MMO); |
4882 | 10 | SDValue Success = emitSETCC(DAG, DL, Res.getValue(2), |
4883 | 10 | SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); |
4884 | 10 | Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); |
4885 | 10 | Results.push_back(lowerGR128ToI128(DAG, Res)); |
4886 | 10 | Results.push_back(Success); |
4887 | 10 | Results.push_back(Res.getValue(1)); |
4888 | 10 | break; |
4889 | 13 | } |
4890 | 0 | default: |
4891 | 0 | llvm_unreachable("Unexpected node to lower"); |
4892 | 13 | } |
4893 | 13 | } |
4894 | | |
4895 | | void |
4896 | | SystemZTargetLowering::ReplaceNodeResults(SDNode *N, |
4897 | | SmallVectorImpl<SDValue> &Results, |
4898 | 11 | SelectionDAG &DAG) const { |
4899 | 11 | return LowerOperationWrapper(N, Results, DAG); |
4900 | 11 | } |
4901 | | |
4902 | 0 | const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { |
4903 | 0 | #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME |
4904 | 0 | switch ((SystemZISD::NodeType)Opcode) { |
4905 | 0 | case SystemZISD::FIRST_NUMBER: break; |
4906 | 0 | OPCODE0 (RET_FLAG); |
4907 | 0 | OPCODE0 (CALL); |
4908 | 0 | OPCODE0 (SIBCALL); |
4909 | 0 | OPCODE0 (TLS_GDCALL); |
4910 | 0 | OPCODE0 (TLS_LDCALL); |
4911 | 0 | OPCODE0 (PCREL_WRAPPER); |
4912 | 0 | OPCODE0 (PCREL_OFFSET); |
4913 | 0 | OPCODE0 (IABS); |
4914 | 0 | OPCODE0 (ICMP); |
4915 | 0 | OPCODE0 (FCMP); |
4916 | 0 | OPCODE0 (TM); |
4917 | 0 | OPCODE0 (BR_CCMASK); |
4918 | 0 | OPCODE0 (SELECT_CCMASK); |
4919 | 0 | OPCODE0 (ADJDYNALLOC); |
4920 | 0 | OPCODE0 (POPCNT); |
4921 | 0 | OPCODE0 (SMUL_LOHI); |
4922 | 0 | OPCODE0 (UMUL_LOHI); |
4923 | 0 | OPCODE0 (SDIVREM); |
4924 | 0 | OPCODE0 (UDIVREM); |
4925 | 0 | OPCODE0 (MVC); |
4926 | 0 | OPCODE0 (MVC_LOOP); |
4927 | 0 | OPCODE0 (NC); |
4928 | 0 | OPCODE0 (NC_LOOP); |
4929 | 0 | OPCODE0 (OC); |
4930 | 0 | OPCODE0 (OC_LOOP); |
4931 | 0 | OPCODE0 (XC); |
4932 | 0 | OPCODE0 (XC_LOOP); |
4933 | 0 | OPCODE0 (CLC); |
4934 | 0 | OPCODE0 (CLC_LOOP); |
4935 | 0 | OPCODE0 (STPCPY); |
4936 | 0 | OPCODE0 (STRCMP); |
4937 | 0 | OPCODE0 (SEARCH_STRING); |
4938 | 0 | OPCODE0 (IPM); |
4939 | 0 | OPCODE0 (MEMBARRIER); |
4940 | 0 | OPCODE0 (TBEGIN); |
4941 | 0 | OPCODE0 (TBEGIN_NOFLOAT); |
4942 | 0 | OPCODE0 (TEND); |
4943 | 0 | OPCODE0 (BYTE_MASK); |
4944 | 0 | OPCODE0 (ROTATE_MASK); |
4945 | 0 | OPCODE0 (REPLICATE); |
4946 | 0 | OPCODE0 (JOIN_DWORDS); |
4947 | 0 | OPCODE0 (SPLAT); |
4948 | 0 | OPCODE0 (MERGE_HIGH); |
4949 | 0 | OPCODE0 (MERGE_LOW); |
4950 | 0 | OPCODE0 (SHL_DOUBLE); |
4951 | 0 | OPCODE0 (PERMUTE_DWORDS); |
4952 | 0 | OPCODE0 (PERMUTE); |
4953 | 0 | OPCODE0 (PACK); |
4954 | 0 | OPCODE0 (PACKS_CC); |
4955 | 0 | OPCODE0 (PACKLS_CC); |
4956 | 0 | OPCODE0 (UNPACK_HIGH); |
4957 | 0 | OPCODE0 (UNPACKL_HIGH); |
4958 | 0 | OPCODE0 (UNPACK_LOW); |
4959 | 0 | OPCODE0 (UNPACKL_LOW); |
4960 | 0 | OPCODE0 (VSHL_BY_SCALAR); |
4961 | 0 | OPCODE0 (VSRL_BY_SCALAR); |
4962 | 0 | OPCODE0 (VSRA_BY_SCALAR); |
4963 | 0 | OPCODE0 (VSUM); |
4964 | 0 | OPCODE0 (VICMPE); |
4965 | 0 | OPCODE0 (VICMPH); |
4966 | 0 | OPCODE0 (VICMPHL); |
4967 | 0 | OPCODE0 (VICMPES); |
4968 | 0 | OPCODE0 (VICMPHS); |
4969 | 0 | OPCODE0 (VICMPHLS); |
4970 | 0 | OPCODE0 (VFCMPE); |
4971 | 0 | OPCODE0 (VFCMPH); |
4972 | 0 | OPCODE0 (VFCMPHE); |
4973 | 0 | OPCODE0 (VFCMPES); |
4974 | 0 | OPCODE0 (VFCMPHS); |
4975 | 0 | OPCODE0 (VFCMPHES); |
4976 | 0 | OPCODE0 (VFTCI); |
4977 | 0 | OPCODE0 (VEXTEND); |
4978 | 0 | OPCODE0 (VROUND); |
4979 | 0 | OPCODE0 (VTM); |
4980 | 0 | OPCODE0 (VFAE_CC); |
4981 | 0 | OPCODE0 (VFAEZ_CC); |
4982 | 0 | OPCODE0 (VFEE_CC); |
4983 | 0 | OPCODE0 (VFEEZ_CC); |
4984 | 0 | OPCODE0 (VFENE_CC); |
4985 | 0 | OPCODE0 (VFENEZ_CC); |
4986 | 0 | OPCODE0 (VISTR_CC); |
4987 | 0 | OPCODE0 (VSTRC_CC); |
4988 | 0 | OPCODE0 (VSTRCZ_CC); |
4989 | 0 | OPCODE0 (TDC); |
4990 | 0 | OPCODE0 (ATOMIC_SWAPW); |
4991 | 0 | OPCODE0 (ATOMIC_LOADW_ADD); |
4992 | 0 | OPCODE0 (ATOMIC_LOADW_SUB); |
4993 | 0 | OPCODE0 (ATOMIC_LOADW_AND); |
4994 | 0 | OPCODE0 (ATOMIC_LOADW_OR); |
4995 | 0 | OPCODE0 (ATOMIC_LOADW_XOR); |
4996 | 0 | OPCODE0 (ATOMIC_LOADW_NAND); |
4997 | 0 | OPCODE0 (ATOMIC_LOADW_MIN); |
4998 | 0 | OPCODE0 (ATOMIC_LOADW_MAX); |
4999 | 0 | OPCODE0 (ATOMIC_LOADW_UMIN); |
5000 | 0 | OPCODE0 (ATOMIC_LOADW_UMAX); |
5001 | 0 | OPCODE0 (ATOMIC_CMP_SWAPW); |
5002 | 0 | OPCODE0 (ATOMIC_CMP_SWAP); |
5003 | 0 | OPCODE0 (ATOMIC_LOAD_128); |
5004 | 0 | OPCODE0 (ATOMIC_STORE_128); |
5005 | 0 | OPCODE0 (ATOMIC_CMP_SWAP_128); |
5006 | 0 | OPCODE0 (LRV); |
5007 | 0 | OPCODE0 (STRV); |
5008 | 0 | OPCODE(PREFETCH); |
5009 | 0 | } |
5010 | 0 | return nullptr; |
5011 | 0 | #undef OPCODE |
5012 | 0 | } |
5013 | | |
5014 | | // Return true if VT is a vector whose elements are a whole number of bytes |
5015 | | // in width. Also check for presence of vector support. |
5016 | 331 | bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { |
5017 | 331 | if (!Subtarget.hasVector()) |
5018 | 3 | return false; |
5019 | 328 | |
5020 | 328 | return VT.isVector() && 328 VT.getScalarSizeInBits() % 8 == 0328 && VT.isSimple()327 ; |
5021 | 331 | } |
5022 | | |
5023 | | // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT |
5024 | | // producing a result of type ResVT. Op is a possibly bitcast version |
5025 | | // of the input vector and Index is the index (based on type VecVT) that |
5026 | | // should be extracted. Return the new extraction if a simplification |
5027 | | // was possible or if Force is true. |
5028 | | SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT, |
5029 | | EVT VecVT, SDValue Op, |
5030 | | unsigned Index, |
5031 | | DAGCombinerInfo &DCI, |
5032 | 4.10k | bool Force) const { |
5033 | 4.10k | SelectionDAG &DAG = DCI.DAG; |
5034 | 4.10k | |
5035 | 4.10k | // The number of bytes being extracted. |
5036 | 4.10k | unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); |
5037 | 4.10k | |
5038 | 5.32k | for (;;) { |
5039 | 5.32k | unsigned Opcode = Op.getOpcode(); |
5040 | 5.32k | if (Opcode == ISD::BITCAST) |
5041 | 5.32k | // Look through bitcasts. |
5042 | 1.13k | Op = Op.getOperand(0); |
5043 | 4.18k | else if (4.18k Opcode == ISD::VECTOR_SHUFFLE && |
5044 | 4.18k | canTreatAsByteVector(Op.getValueType())75 ) { |
5045 | 75 | // Get a VPERM-like permute mask and see whether the bytes covered |
5046 | 75 | // by the extracted element are a contiguous sequence from one |
5047 | 75 | // source operand. |
5048 | 75 | SmallVector<int, SystemZ::VectorBytes> Bytes; |
5049 | 75 | getVPermMask(cast<ShuffleVectorSDNode>(Op), Bytes); |
5050 | 75 | int First; |
5051 | 75 | if (!getShuffleInput(Bytes, Index * BytesPerElement, |
5052 | 75 | BytesPerElement, First)) |
5053 | 0 | break; |
5054 | 75 | if (75 First < 075 ) |
5055 | 0 | return DAG.getUNDEF(ResVT); |
5056 | 75 | // Make sure the contiguous sequence starts at a multiple of the |
5057 | 75 | // original element size. |
5058 | 75 | unsigned Byte = unsigned(First) % Bytes.size(); |
5059 | 75 | if (Byte % BytesPerElement != 0) |
5060 | 0 | break; |
5061 | 75 | // We can get the extracted value directly from an input. |
5062 | 75 | Index = Byte / BytesPerElement; |
5063 | 75 | Op = Op.getOperand(unsigned(First) / Bytes.size()); |
5064 | 75 | Force = true; |
5065 | 4.18k | } else if (4.11k Opcode == ISD::BUILD_VECTOR && |
5066 | 4.11k | canTreatAsByteVector(Op.getValueType())67 ) { |
5067 | 65 | // We can only optimize this case if the BUILD_VECTOR elements are |
5068 | 65 | // at least as wide as the extracted value. |
5069 | 65 | EVT OpVT = Op.getValueType(); |
5070 | 65 | unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); |
5071 | 65 | if (OpBytesPerElement < BytesPerElement) |
5072 | 24 | break; |
5073 | 41 | // Make sure that the least-significant bit of the extracted value |
5074 | 41 | // is the least significant bit of an input. |
5075 | 41 | unsigned End = (Index + 1) * BytesPerElement; |
5076 | 41 | if (End % OpBytesPerElement != 0) |
5077 | 0 | break; |
5078 | 41 | // We're extracting the low part of one operand of the BUILD_VECTOR. |
5079 | 41 | Op = Op.getOperand(End / OpBytesPerElement - 1); |
5080 | 41 | if (!Op.getValueType().isInteger()41 ) { |
5081 | 0 | EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); |
5082 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); |
5083 | 0 | DCI.AddToWorklist(Op.getNode()); |
5084 | 0 | } |
5085 | 41 | EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); |
5086 | 41 | Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); |
5087 | 41 | if (VT != ResVT41 ) { |
5088 | 0 | DCI.AddToWorklist(Op.getNode()); |
5089 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op); |
5090 | 0 | } |
5091 | 65 | return Op; |
5092 | 4.04k | } else if (4.04k (Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || |
5093 | 4.03k | Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || |
5094 | 4.03k | Opcode == ISD::ANY_EXTEND_VECTOR_INREG) && |
5095 | 18 | canTreatAsByteVector(Op.getValueType()) && |
5096 | 4.04k | canTreatAsByteVector(Op.getOperand(0).getValueType())18 ) { |
5097 | 18 | // Make sure that only the unextended bits are significant. |
5098 | 18 | EVT ExtVT = Op.getValueType(); |
5099 | 18 | EVT OpVT = Op.getOperand(0).getValueType(); |
5100 | 18 | unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize(); |
5101 | 18 | unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); |
5102 | 18 | unsigned Byte = Index * BytesPerElement; |
5103 | 18 | unsigned SubByte = Byte % ExtBytesPerElement; |
5104 | 18 | unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; |
5105 | 18 | if (SubByte < MinSubByte || |
5106 | 6 | SubByte + BytesPerElement > ExtBytesPerElement) |
5107 | 12 | break; |
5108 | 6 | // Get the byte offset of the unextended element |
5109 | 6 | Byte = Byte / ExtBytesPerElement * OpBytesPerElement; |
5110 | 6 | // ...then add the byte offset relative to that element. |
5111 | 6 | Byte += SubByte - MinSubByte; |
5112 | 6 | if (Byte % BytesPerElement != 0) |
5113 | 0 | break; |
5114 | 6 | Op = Op.getOperand(0); |
5115 | 6 | Index = Byte / BytesPerElement; |
5116 | 6 | Force = true; |
5117 | 6 | } else |
5118 | 4.03k | break; |
5119 | 4.06k | } |
5120 | 4.06k | if (4.06k Force4.06k ) { |
5121 | 153 | if (Op.getValueType() != VecVT153 ) { |
5122 | 99 | Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op); |
5123 | 99 | DCI.AddToWorklist(Op.getNode()); |
5124 | 99 | } |
5125 | 153 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op, |
5126 | 153 | DAG.getConstant(Index, DL, MVT::i32)); |
5127 | 153 | } |
5128 | 3.91k | return SDValue(); |
5129 | 3.91k | } |
5130 | | |
5131 | | // Optimize vector operations in scalar value Op on the basis that Op |
5132 | | // is truncated to TruncVT. |
5133 | | SDValue SystemZTargetLowering::combineTruncateExtract( |
5134 | 7.50k | const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const { |
5135 | 7.50k | // If we have (trunc (extract_vector_elt X, Y)), try to turn it into |
5136 | 7.50k | // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements |
5137 | 7.50k | // of type TruncVT. |
5138 | 7.50k | if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
5139 | 7.50k | TruncVT.getSizeInBits() % 8 == 0183 ) { |
5140 | 153 | SDValue Vec = Op.getOperand(0); |
5141 | 153 | EVT VecVT = Vec.getValueType(); |
5142 | 153 | if (canTreatAsByteVector(VecVT)153 ) { |
5143 | 149 | if (auto *IndexN149 = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { |
5144 | 137 | unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); |
5145 | 137 | unsigned TruncBytes = TruncVT.getStoreSize(); |
5146 | 137 | if (BytesPerElement % TruncBytes == 0137 ) { |
5147 | 137 | // Calculate the value of Y' in the above description. We are |
5148 | 137 | // splitting the original elements into Scale equal-sized pieces |
5149 | 137 | // and for truncation purposes want the last (least-significant) |
5150 | 137 | // of these pieces for IndexN. This is easiest to do by calculating |
5151 | 137 | // the start index of the following element and then subtracting 1. |
5152 | 137 | unsigned Scale = BytesPerElement / TruncBytes; |
5153 | 137 | unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; |
5154 | 137 | |
5155 | 137 | // Defer the creation of the bitcast from X to combineExtract, |
5156 | 137 | // which might be able to optimize the extraction. |
5157 | 137 | VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8), |
5158 | 137 | VecVT.getStoreSize() / TruncBytes); |
5159 | 137 | EVT ResVT = (TruncBytes < 4 ? MVT::i3273 : TruncVT64 ); |
5160 | 137 | return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true); |
5161 | 137 | } |
5162 | 7.36k | } |
5163 | 149 | } |
5164 | 153 | } |
5165 | 7.36k | return SDValue(); |
5166 | 7.36k | } |
5167 | | |
5168 | | SDValue SystemZTargetLowering::combineSIGN_EXTEND( |
5169 | 262 | SDNode *N, DAGCombinerInfo &DCI) const { |
5170 | 262 | // Convert (sext (ashr (shl X, C1), C2)) to |
5171 | 262 | // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as |
5172 | 262 | // cheap as narrower ones. |
5173 | 262 | SelectionDAG &DAG = DCI.DAG; |
5174 | 262 | SDValue N0 = N->getOperand(0); |
5175 | 262 | EVT VT = N->getValueType(0); |
5176 | 262 | if (N0.hasOneUse() && 262 N0.getOpcode() == ISD::SRA170 ) { |
5177 | 1 | auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
5178 | 1 | SDValue Inner = N0.getOperand(0); |
5179 | 1 | if (SraAmt && 1 Inner.hasOneUse()1 && Inner.getOpcode() == ISD::SHL1 ) { |
5180 | 1 | if (auto *ShlAmt1 = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { |
5181 | 1 | unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); |
5182 | 1 | unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; |
5183 | 1 | unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; |
5184 | 1 | EVT ShiftVT = N0.getOperand(1).getValueType(); |
5185 | 1 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, |
5186 | 1 | Inner.getOperand(0)); |
5187 | 1 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, |
5188 | 1 | DAG.getConstant(NewShlAmt, SDLoc(Inner), |
5189 | 1 | ShiftVT)); |
5190 | 1 | return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, |
5191 | 1 | DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT)); |
5192 | 1 | } |
5193 | 261 | } |
5194 | 1 | } |
5195 | 261 | return SDValue(); |
5196 | 261 | } |
5197 | | |
5198 | | SDValue SystemZTargetLowering::combineMERGE( |
5199 | 939 | SDNode *N, DAGCombinerInfo &DCI) const { |
5200 | 939 | SelectionDAG &DAG = DCI.DAG; |
5201 | 939 | unsigned Opcode = N->getOpcode(); |
5202 | 939 | SDValue Op0 = N->getOperand(0); |
5203 | 939 | SDValue Op1 = N->getOperand(1); |
5204 | 939 | if (Op0.getOpcode() == ISD::BITCAST) |
5205 | 851 | Op0 = Op0.getOperand(0); |
5206 | 939 | if (Op0.getOpcode() == SystemZISD::BYTE_MASK && |
5207 | 939 | cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 07 ) { |
5208 | 7 | // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF |
5209 | 7 | // for v4f32. |
5210 | 7 | if (Op1 == N->getOperand(0)) |
5211 | 3 | return Op1; |
5212 | 4 | // (z_merge_? 0, X) -> (z_unpackl_? 0, X). |
5213 | 4 | EVT VT = Op1.getValueType(); |
5214 | 4 | unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); |
5215 | 4 | if (ElemBytes <= 44 ) { |
5216 | 2 | Opcode = (Opcode == SystemZISD::MERGE_HIGH ? |
5217 | 2 | SystemZISD::UNPACKL_HIGH2 : SystemZISD::UNPACKL_LOW0 ); |
5218 | 2 | EVT InVT = VT.changeVectorElementTypeToInteger(); |
5219 | 2 | EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16), |
5220 | 2 | SystemZ::VectorBytes / ElemBytes / 2); |
5221 | 2 | if (VT != InVT2 ) { |
5222 | 2 | Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1); |
5223 | 2 | DCI.AddToWorklist(Op1.getNode()); |
5224 | 2 | } |
5225 | 2 | SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1); |
5226 | 2 | DCI.AddToWorklist(Op.getNode()); |
5227 | 2 | return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); |
5228 | 2 | } |
5229 | 934 | } |
5230 | 934 | return SDValue(); |
5231 | 934 | } |
5232 | | |
5233 | | SDValue SystemZTargetLowering::combineSTORE( |
5234 | 9.82k | SDNode *N, DAGCombinerInfo &DCI) const { |
5235 | 9.82k | SelectionDAG &DAG = DCI.DAG; |
5236 | 9.82k | auto *SN = cast<StoreSDNode>(N); |
5237 | 9.82k | auto &Op1 = N->getOperand(1); |
5238 | 9.82k | EVT MemVT = SN->getMemoryVT(); |
5239 | 9.82k | // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better |
5240 | 9.82k | // for the extraction to be done on a vMiN value, so that we can use VSTE. |
5241 | 9.82k | // If X has wider elements then convert it to: |
5242 | 9.82k | // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z). |
5243 | 9.82k | if (MemVT.isInteger()9.82k ) { |
5244 | 7.50k | if (SDValue Value = |
5245 | 137 | combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { |
5246 | 137 | DCI.AddToWorklist(Value.getNode()); |
5247 | 137 | |
5248 | 137 | // Rewrite the store with the new form of stored value. |
5249 | 137 | return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, |
5250 | 137 | SN->getBasePtr(), SN->getMemoryVT(), |
5251 | 137 | SN->getMemOperand()); |
5252 | 137 | } |
5253 | 9.68k | } |
5254 | 9.68k | // Combine STORE (BSWAP) into STRVH/STRV/STRVG |
5255 | 9.68k | // See comment in combineBSWAP about volatile accesses. |
5256 | 9.68k | if (9.68k !SN->isTruncatingStore() && |
5257 | 8.53k | !SN->isVolatile() && |
5258 | 4.38k | Op1.getOpcode() == ISD::BSWAP && |
5259 | 21 | Op1.getNode()->hasOneUse() && |
5260 | 21 | (Op1.getValueType() == MVT::i16 || |
5261 | 14 | Op1.getValueType() == MVT::i32 || |
5262 | 9.68k | Op1.getValueType() == MVT::i647 )) { |
5263 | 21 | |
5264 | 21 | SDValue BSwapOp = Op1.getOperand(0); |
5265 | 21 | |
5266 | 21 | if (BSwapOp.getValueType() == MVT::i16) |
5267 | 7 | BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp); |
5268 | 21 | |
5269 | 21 | SDValue Ops[] = { |
5270 | 21 | N->getOperand(0), BSwapOp, N->getOperand(2), |
5271 | 21 | DAG.getValueType(Op1.getValueType()) |
5272 | 21 | }; |
5273 | 21 | |
5274 | 21 | return |
5275 | 21 | DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other), |
5276 | 21 | Ops, MemVT, SN->getMemOperand()); |
5277 | 21 | } |
5278 | 9.66k | return SDValue(); |
5279 | 9.66k | } |
5280 | | |
5281 | | SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( |
5282 | 4.01k | SDNode *N, DAGCombinerInfo &DCI) const { |
5283 | 4.01k | |
5284 | 4.01k | if (!Subtarget.hasVector()) |
5285 | 8 | return SDValue(); |
5286 | 4.00k | |
5287 | 4.00k | // Try to simplify a vector extraction. |
5288 | 4.00k | if (auto *4.00k IndexN4.00k = dyn_cast<ConstantSDNode>(N->getOperand(1))) { |
5289 | 3.97k | SDValue Op0 = N->getOperand(0); |
5290 | 3.97k | EVT VecVT = Op0.getValueType(); |
5291 | 3.97k | return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, |
5292 | 3.97k | IndexN->getZExtValue(), DCI, false); |
5293 | 3.97k | } |
5294 | 33 | return SDValue(); |
5295 | 33 | } |
5296 | | |
5297 | | SDValue SystemZTargetLowering::combineJOIN_DWORDS( |
5298 | 63 | SDNode *N, DAGCombinerInfo &DCI) const { |
5299 | 63 | SelectionDAG &DAG = DCI.DAG; |
5300 | 63 | // (join_dwords X, X) == (replicate X) |
5301 | 63 | if (N->getOperand(0) == N->getOperand(1)) |
5302 | 15 | return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), |
5303 | 15 | N->getOperand(0)); |
5304 | 48 | return SDValue(); |
5305 | 48 | } |
5306 | | |
5307 | | SDValue SystemZTargetLowering::combineFP_ROUND( |
5308 | 99 | SDNode *N, DAGCombinerInfo &DCI) const { |
5309 | 99 | // (fpround (extract_vector_elt X 0)) |
5310 | 99 | // (fpround (extract_vector_elt X 1)) -> |
5311 | 99 | // (extract_vector_elt (VROUND X) 0) |
5312 | 99 | // (extract_vector_elt (VROUND X) 1) |
5313 | 99 | // |
5314 | 99 | // This is a special case since the target doesn't really support v2f32s. |
5315 | 99 | SelectionDAG &DAG = DCI.DAG; |
5316 | 99 | SDValue Op0 = N->getOperand(0); |
5317 | 99 | if (N->getValueType(0) == MVT::f32 && |
5318 | 42 | Op0.hasOneUse() && |
5319 | 42 | Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
5320 | 4 | Op0.getOperand(0).getValueType() == MVT::v2f64 && |
5321 | 4 | Op0.getOperand(1).getOpcode() == ISD::Constant && |
5322 | 99 | cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 04 ) { |
5323 | 3 | SDValue Vec = Op0.getOperand(0); |
5324 | 3 | for (auto *U : Vec->uses()) { |
5325 | 3 | if (U != Op0.getNode() && |
5326 | 1 | U->hasOneUse() && |
5327 | 1 | U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
5328 | 1 | U->getOperand(0) == Vec && |
5329 | 1 | U->getOperand(1).getOpcode() == ISD::Constant && |
5330 | 3 | cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 11 ) { |
5331 | 1 | SDValue OtherRound = SDValue(*U->use_begin(), 0); |
5332 | 1 | if (OtherRound.getOpcode() == ISD::FP_ROUND && |
5333 | 1 | OtherRound.getOperand(0) == SDValue(U, 0) && |
5334 | 1 | OtherRound.getValueType() == MVT::f321 ) { |
5335 | 1 | SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N), |
5336 | 1 | MVT::v4f32, Vec); |
5337 | 1 | DCI.AddToWorklist(VRound.getNode()); |
5338 | 1 | SDValue Extract1 = |
5339 | 1 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32, |
5340 | 1 | VRound, DAG.getConstant(2, SDLoc(U), MVT::i32)); |
5341 | 1 | DCI.AddToWorklist(Extract1.getNode()); |
5342 | 1 | DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1); |
5343 | 1 | SDValue Extract0 = |
5344 | 1 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32, |
5345 | 1 | VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); |
5346 | 1 | return Extract0; |
5347 | 1 | } |
5348 | 98 | } |
5349 | 3 | } |
5350 | 3 | } |
5351 | 98 | return SDValue(); |
5352 | 98 | } |
5353 | | |
5354 | | SDValue SystemZTargetLowering::combineBSWAP( |
5355 | 104 | SDNode *N, DAGCombinerInfo &DCI) const { |
5356 | 104 | SelectionDAG &DAG = DCI.DAG; |
5357 | 104 | // Combine BSWAP (LOAD) into LRVH/LRV/LRVG |
5358 | 104 | // These loads are allowed to access memory multiple times, and so we must check |
5359 | 104 | // that the loads are not volatile before performing the combine. |
5360 | 104 | if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && |
5361 | 91 | N->getOperand(0).hasOneUse() && |
5362 | 27 | (N->getValueType(0) == MVT::i16 || 27 N->getValueType(0) == MVT::i3219 || |
5363 | 27 | N->getValueType(0) == MVT::i64) && |
5364 | 104 | !cast<LoadSDNode>(N->getOperand(0))->isVolatile()27 ) { |
5365 | 22 | SDValue Load = N->getOperand(0); |
5366 | 22 | LoadSDNode *LD = cast<LoadSDNode>(Load); |
5367 | 22 | |
5368 | 22 | // Create the byte-swapping load. |
5369 | 22 | SDValue Ops[] = { |
5370 | 22 | LD->getChain(), // Chain |
5371 | 22 | LD->getBasePtr(), // Ptr |
5372 | 22 | DAG.getValueType(N->getValueType(0)) // VT |
5373 | 22 | }; |
5374 | 22 | SDValue BSLoad = |
5375 | 22 | DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N), |
5376 | 22 | DAG.getVTList(N->getValueType(0) == MVT::i64 ? |
5377 | 22 | MVT::i648 : MVT::i3214 , MVT::Other), |
5378 | 22 | Ops, LD->getMemoryVT(), LD->getMemOperand()); |
5379 | 22 | |
5380 | 22 | // If this is an i16 load, insert the truncate. |
5381 | 22 | SDValue ResVal = BSLoad; |
5382 | 22 | if (N->getValueType(0) == MVT::i16) |
5383 | 7 | ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad); |
5384 | 22 | |
5385 | 22 | // First, combine the bswap away. This makes the value produced by the |
5386 | 22 | // load dead. |
5387 | 22 | DCI.CombineTo(N, ResVal); |
5388 | 22 | |
5389 | 22 | // Next, combine the load away, we give it a bogus result value but a real |
5390 | 22 | // chain result. The result value is dead because the bswap is dead. |
5391 | 22 | DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); |
5392 | 22 | |
5393 | 22 | // Return N so it doesn't get rechecked! |
5394 | 22 | return SDValue(N, 0); |
5395 | 22 | } |
5396 | 82 | return SDValue(); |
5397 | 82 | } |
5398 | | |
5399 | | SDValue SystemZTargetLowering::combineSHIFTROT( |
5400 | 2.26k | SDNode *N, DAGCombinerInfo &DCI) const { |
5401 | 2.26k | |
5402 | 2.26k | SelectionDAG &DAG = DCI.DAG; |
5403 | 2.26k | |
5404 | 2.26k | // Shift/rotate instructions only use the last 6 bits of the second operand |
5405 | 2.26k | // register. If the second operand is the result of an AND with an immediate |
5406 | 2.26k | // value that has its last 6 bits set, we can safely remove the AND operation. |
5407 | 2.26k | // |
5408 | 2.26k | // If the AND operation doesn't have the last 6 bits set, we can't remove it |
5409 | 2.26k | // entirely, but we can still truncate it to a 16-bit value. This prevents |
5410 | 2.26k | // us from ending up with a NILL with a signed operand, which will cause the |
5411 | 2.26k | // instruction printer to abort. |
5412 | 2.26k | SDValue N1 = N->getOperand(1); |
5413 | 2.26k | if (N1.getOpcode() == ISD::AND2.26k ) { |
5414 | 42 | SDValue AndMaskOp = N1->getOperand(1); |
5415 | 42 | auto *AndMask = dyn_cast<ConstantSDNode>(AndMaskOp); |
5416 | 42 | |
5417 | 42 | // The AND mask is constant |
5418 | 42 | if (AndMask42 ) { |
5419 | 40 | auto AmtVal = AndMask->getZExtValue(); |
5420 | 40 | |
5421 | 40 | // Bottom 6 bits are set |
5422 | 40 | if ((AmtVal & 0x3f) == 0x3f40 ) { |
5423 | 13 | SDValue AndOp = N1->getOperand(0); |
5424 | 13 | |
5425 | 13 | // This is the only use, so remove the node |
5426 | 13 | if (N1.hasOneUse()13 ) { |
5427 | 11 | // Combine the AND away |
5428 | 11 | DCI.CombineTo(N1.getNode(), AndOp); |
5429 | 11 | |
5430 | 11 | // Return N so it isn't rechecked |
5431 | 11 | return SDValue(N, 0); |
5432 | 11 | |
5433 | 11 | // The node will be reused, so create a new node for this one use |
5434 | 0 | } else { |
5435 | 2 | SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N), |
5436 | 2 | N->getValueType(0), N->getOperand(0), |
5437 | 2 | AndOp); |
5438 | 2 | DCI.AddToWorklist(Replace.getNode()); |
5439 | 2 | |
5440 | 2 | return Replace; |
5441 | 2 | } |
5442 | 40 | |
5443 | 40 | // We can't remove the AND, but we can use NILL here (normally we would |
5444 | 40 | // use NILF). Only keep the last 16 bits of the mask. The actual |
5445 | 40 | // transformation will be handled by .td definitions. |
5446 | 27 | } else if (27 AmtVal >> 16 != 027 ) { |
5447 | 2 | SDValue AndOp = N1->getOperand(0); |
5448 | 2 | |
5449 | 2 | auto NewMask = DAG.getConstant(AndMask->getZExtValue() & 0x0000ffff, |
5450 | 2 | SDLoc(AndMaskOp), |
5451 | 2 | AndMaskOp.getValueType()); |
5452 | 2 | |
5453 | 2 | auto NewAnd = DAG.getNode(N1.getOpcode(), SDLoc(N1), N1.getValueType(), |
5454 | 2 | AndOp, NewMask); |
5455 | 2 | |
5456 | 2 | SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N), |
5457 | 2 | N->getValueType(0), N->getOperand(0), |
5458 | 2 | NewAnd); |
5459 | 2 | DCI.AddToWorklist(Replace.getNode()); |
5460 | 2 | |
5461 | 2 | return Replace; |
5462 | 2 | } |
5463 | 2.25k | } |
5464 | 42 | } |
5465 | 2.25k | |
5466 | 2.25k | return SDValue(); |
5467 | 2.25k | } |
5468 | | |
5469 | | SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, |
5470 | 47.9k | DAGCombinerInfo &DCI) const { |
5471 | 47.9k | switch(N->getOpcode()) { |
5472 | 30.3k | default: break; |
5473 | 262 | case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI); |
5474 | 939 | case SystemZISD::MERGE_HIGH: |
5475 | 939 | case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI); |
5476 | 9.82k | case ISD::STORE: return combineSTORE(N, DCI); |
5477 | 4.01k | case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI); |
5478 | 63 | case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI); |
5479 | 99 | case ISD::FP_ROUND: return combineFP_ROUND(N, DCI); |
5480 | 104 | case ISD::BSWAP: return combineBSWAP(N, DCI); |
5481 | 2.26k | case ISD::SHL: |
5482 | 2.26k | case ISD::SRA: |
5483 | 2.26k | case ISD::SRL: |
5484 | 2.26k | case ISD::ROTL: return combineSHIFTROT(N, DCI); |
5485 | 30.3k | } |
5486 | 30.3k | |
5487 | 30.3k | return SDValue(); |
5488 | 30.3k | } |
5489 | | |
5490 | | //===----------------------------------------------------------------------===// |
5491 | | // Custom insertion |
5492 | | //===----------------------------------------------------------------------===// |
5493 | | |
5494 | | // Create a new basic block after MBB. |
5495 | 2.40k | static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { |
5496 | 2.40k | MachineFunction &MF = *MBB->getParent(); |
5497 | 2.40k | MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); |
5498 | 2.40k | MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); |
5499 | 2.40k | return NewMBB; |
5500 | 2.40k | } |
5501 | | |
5502 | | // Split MBB after MI and return the new block (the one that contains |
5503 | | // instructions after MI). |
5504 | | static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI, |
5505 | 6 | MachineBasicBlock *MBB) { |
5506 | 6 | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); |
5507 | 6 | NewMBB->splice(NewMBB->begin(), MBB, |
5508 | 6 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); |
5509 | 6 | NewMBB->transferSuccessorsAndUpdatePHIs(MBB); |
5510 | 6 | return NewMBB; |
5511 | 6 | } |
5512 | | |
5513 | | // Split MBB before MI and return the new block (the one that contains MI). |
5514 | | static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI, |
5515 | 1.12k | MachineBasicBlock *MBB) { |
5516 | 1.12k | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); |
5517 | 1.12k | NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); |
5518 | 1.12k | NewMBB->transferSuccessorsAndUpdatePHIs(MBB); |
5519 | 1.12k | return NewMBB; |
5520 | 1.12k | } |
5521 | | |
5522 | | // Force base value Base into a register before MI. Return the register. |
5523 | | static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, |
5524 | 6 | const SystemZInstrInfo *TII) { |
5525 | 6 | if (Base.isReg()) |
5526 | 5 | return Base.getReg(); |
5527 | 1 | |
5528 | 1 | MachineBasicBlock *MBB = MI.getParent(); |
5529 | 1 | MachineFunction &MF = *MBB->getParent(); |
5530 | 1 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
5531 | 1 | |
5532 | 1 | unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
5533 | 1 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) |
5534 | 1 | .add(Base) |
5535 | 1 | .addImm(0) |
5536 | 1 | .addReg(0); |
5537 | 1 | return Reg; |
5538 | 1 | } |
5539 | | |
5540 | | // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. |
5541 | | MachineBasicBlock * |
5542 | | SystemZTargetLowering::emitSelect(MachineInstr &MI, |
5543 | | MachineBasicBlock *MBB, |
5544 | 610 | unsigned LOCROpcode) const { |
5545 | 610 | const SystemZInstrInfo *TII = |
5546 | 610 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
5547 | 610 | |
5548 | 610 | unsigned DestReg = MI.getOperand(0).getReg(); |
5549 | 610 | unsigned TrueReg = MI.getOperand(1).getReg(); |
5550 | 610 | unsigned FalseReg = MI.getOperand(2).getReg(); |
5551 | 610 | unsigned CCValid = MI.getOperand(3).getImm(); |
5552 | 610 | unsigned CCMask = MI.getOperand(4).getImm(); |
5553 | 610 | DebugLoc DL = MI.getDebugLoc(); |
5554 | 610 | |
5555 | 610 | // Use LOCROpcode if possible. |
5556 | 610 | if (LOCROpcode && 610 Subtarget.hasLoadStoreOnCond()142 ) { |
5557 | 47 | BuildMI(*MBB, MI, DL, TII->get(LOCROpcode), DestReg) |
5558 | 47 | .addReg(FalseReg).addReg(TrueReg) |
5559 | 47 | .addImm(CCValid).addImm(CCMask); |
5560 | 47 | MI.eraseFromParent(); |
5561 | 47 | return MBB; |
5562 | 47 | } |
5563 | 563 | |
5564 | 563 | MachineBasicBlock *StartMBB = MBB; |
5565 | 563 | MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); |
5566 | 563 | MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); |
5567 | 563 | |
5568 | 563 | // StartMBB: |
5569 | 563 | // BRC CCMask, JoinMBB |
5570 | 563 | // # fallthrough to FalseMBB |
5571 | 563 | MBB = StartMBB; |
5572 | 563 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
5573 | 563 | .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); |
5574 | 563 | MBB->addSuccessor(JoinMBB); |
5575 | 563 | MBB->addSuccessor(FalseMBB); |
5576 | 563 | |
5577 | 563 | // FalseMBB: |
5578 | 563 | // # fallthrough to JoinMBB |
5579 | 563 | MBB = FalseMBB; |
5580 | 563 | MBB->addSuccessor(JoinMBB); |
5581 | 563 | |
5582 | 563 | // JoinMBB: |
5583 | 563 | // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] |
5584 | 563 | // ... |
5585 | 563 | MBB = JoinMBB; |
5586 | 563 | BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) |
5587 | 563 | .addReg(TrueReg).addMBB(StartMBB) |
5588 | 563 | .addReg(FalseReg).addMBB(FalseMBB); |
5589 | 563 | |
5590 | 563 | MI.eraseFromParent(); |
5591 | 563 | return JoinMBB; |
5592 | 563 | } |
5593 | | |
5594 | | // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. |
5595 | | // StoreOpcode is the store to use and Invert says whether the store should |
5596 | | // happen when the condition is false rather than true. If a STORE ON |
5597 | | // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. |
5598 | | MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, |
5599 | | MachineBasicBlock *MBB, |
5600 | | unsigned StoreOpcode, |
5601 | | unsigned STOCOpcode, |
5602 | 121 | bool Invert) const { |
5603 | 121 | const SystemZInstrInfo *TII = |
5604 | 121 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
5605 | 121 | |
5606 | 121 | unsigned SrcReg = MI.getOperand(0).getReg(); |
5607 | 121 | MachineOperand Base = MI.getOperand(1); |
5608 | 121 | int64_t Disp = MI.getOperand(2).getImm(); |
5609 | 121 | unsigned IndexReg = MI.getOperand(3).getReg(); |
5610 | 121 | unsigned CCValid = MI.getOperand(4).getImm(); |
5611 | 121 | unsigned CCMask = MI.getOperand(5).getImm(); |
5612 | 121 | DebugLoc DL = MI.getDebugLoc(); |
5613 | 121 | |
5614 | 121 | StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); |
5615 | 121 | |
5616 | 121 | // Use STOCOpcode if possible. We could use different store patterns in |
5617 | 121 | // order to avoid matching the index register, but the performance trade-offs |
5618 | 121 | // might be more complicated in that case. |
5619 | 121 | if (STOCOpcode && 121 !IndexReg61 && Subtarget.hasLoadStoreOnCond()59 ) { |
5620 | 39 | if (Invert) |
5621 | 30 | CCMask ^= CCValid; |
5622 | 39 | |
5623 | 39 | // ISel pattern matching also adds a load memory operand of the same |
5624 | 39 | // address, so take special care to find the storing memory operand. |
5625 | 39 | MachineMemOperand *MMO = nullptr; |
5626 | 39 | for (auto *I : MI.memoperands()) |
5627 | 39 | if (39 I->isStore()39 ) { |
5628 | 39 | MMO = I; |
5629 | 39 | break; |
5630 | 39 | } |
5631 | 39 | |
5632 | 39 | BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) |
5633 | 82 | .addReg(SrcReg) |
5634 | 82 | .add(Base) |
5635 | 82 | .addImm(Disp) |
5636 | 82 | .addImm(CCValid) |
5637 | 82 | .addImm(CCMask) |
5638 | 82 | .addMemOperand(MMO); |
5639 | 82 | |
5640 | 82 | MI.eraseFromParent(); |
5641 | 82 | return MBB; |
5642 | 82 | } |
5643 | 82 | |
5644 | 82 | // Get the condition needed to branch around the store. |
5645 | 82 | if (82 !Invert82 ) |
5646 | 20 | CCMask ^= CCValid; |
5647 | 121 | |
5648 | 121 | MachineBasicBlock *StartMBB = MBB; |
5649 | 121 | MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); |
5650 | 121 | MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); |
5651 | 121 | |
5652 | 121 | // StartMBB: |
5653 | 121 | // BRC CCMask, JoinMBB |
5654 | 121 | // # fallthrough to FalseMBB |
5655 | 121 | MBB = StartMBB; |
5656 | 121 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
5657 | 121 | .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); |
5658 | 121 | MBB->addSuccessor(JoinMBB); |
5659 | 121 | MBB->addSuccessor(FalseMBB); |
5660 | 121 | |
5661 | 121 | // FalseMBB: |
5662 | 121 | // store %SrcReg, %Disp(%Index,%Base) |
5663 | 121 | // # fallthrough to JoinMBB |
5664 | 121 | MBB = FalseMBB; |
5665 | 121 | BuildMI(MBB, DL, TII->get(StoreOpcode)) |
5666 | 121 | .addReg(SrcReg) |
5667 | 121 | .add(Base) |
5668 | 121 | .addImm(Disp) |
5669 | 121 | .addReg(IndexReg); |
5670 | 121 | MBB->addSuccessor(JoinMBB); |
5671 | 121 | |
5672 | 121 | MI.eraseFromParent(); |
5673 | 121 | return JoinMBB; |
5674 | 121 | } |
5675 | | |
5676 | | // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* |
5677 | | // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that |
5678 | | // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. |
5679 | | // BitSize is the width of the field in bits, or 0 if this is a partword |
5680 | | // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize |
5681 | | // is one of the operands. Invert says whether the field should be |
5682 | | // inverted after performing BinOpcode (e.g. for NAND). |
5683 | | MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( |
5684 | | MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode, |
5685 | 366 | unsigned BitSize, bool Invert) const { |
5686 | 366 | MachineFunction &MF = *MBB->getParent(); |
5687 | 366 | const SystemZInstrInfo *TII = |
5688 | 366 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
5689 | 366 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
5690 | 366 | bool IsSubWord = (BitSize < 32); |
5691 | 366 | |
5692 | 366 | // Extract the operands. Base can be a register or a frame index. |
5693 | 366 | // Src2 can be a register or immediate. |
5694 | 366 | unsigned Dest = MI.getOperand(0).getReg(); |
5695 | 366 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); |
5696 | 366 | int64_t Disp = MI.getOperand(2).getImm(); |
5697 | 366 | MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); |
5698 | 366 | unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg()224 : 0142 ); |
5699 | 366 | unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg()224 : 0142 ); |
5700 | 366 | DebugLoc DL = MI.getDebugLoc(); |
5701 | 366 | if (IsSubWord) |
5702 | 224 | BitSize = MI.getOperand(6).getImm(); |
5703 | 366 | |
5704 | 366 | // Subword operations use 32-bit registers. |
5705 | 366 | const TargetRegisterClass *RC = (BitSize <= 32 ? |
5706 | 280 | &SystemZ::GR32BitRegClass : |
5707 | 86 | &SystemZ::GR64BitRegClass); |
5708 | 366 | unsigned LOpcode = BitSize <= 32 ? SystemZ::L280 : SystemZ::LG86 ; |
5709 | 366 | unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS280 : SystemZ::CSG86 ; |
5710 | 366 | |
5711 | 366 | // Get the right opcodes for the displacement. |
5712 | 366 | LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); |
5713 | 366 | CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); |
5714 | 366 | assert(LOpcode && CSOpcode && "Displacement out of range"); |
5715 | 366 | |
5716 | 366 | // Create virtual registers for temporary results. |
5717 | 366 | unsigned OrigVal = MRI.createVirtualRegister(RC); |
5718 | 366 | unsigned OldVal = MRI.createVirtualRegister(RC); |
5719 | 25 | unsigned NewVal = (BinOpcode || IsSubWord ? |
5720 | 366 | MRI.createVirtualRegister(RC)349 : Src2.getReg()17 ); |
5721 | 366 | unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC)224 : OldVal142 ); |
5722 | 366 | unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC)224 : NewVal142 ); |
5723 | 366 | |
5724 | 366 | // Insert a basic block for the main loop. |
5725 | 366 | MachineBasicBlock *StartMBB = MBB; |
5726 | 366 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); |
5727 | 366 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); |
5728 | 366 | |
5729 | 366 | // StartMBB: |
5730 | 366 | // ... |
5731 | 366 | // %OrigVal = L Disp(%Base) |
5732 | 366 | // # fall through to LoopMMB |
5733 | 366 | MBB = StartMBB; |
5734 | 366 | BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); |
5735 | 366 | MBB->addSuccessor(LoopMBB); |
5736 | 366 | |
5737 | 366 | // LoopMBB: |
5738 | 366 | // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] |
5739 | 366 | // %RotatedOldVal = RLL %OldVal, 0(%BitShift) |
5740 | 366 | // %RotatedNewVal = OP %RotatedOldVal, %Src2 |
5741 | 366 | // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) |
5742 | 366 | // %Dest = CS %OldVal, %NewVal, Disp(%Base) |
5743 | 366 | // JNE LoopMBB |
5744 | 366 | // # fall through to DoneMMB |
5745 | 366 | MBB = LoopMBB; |
5746 | 366 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) |
5747 | 366 | .addReg(OrigVal).addMBB(StartMBB) |
5748 | 366 | .addReg(Dest).addMBB(LoopMBB); |
5749 | 366 | if (IsSubWord) |
5750 | 224 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) |
5751 | 224 | .addReg(OldVal).addReg(BitShift).addImm(0); |
5752 | 366 | if (Invert366 ) { |
5753 | 61 | // Perform the operation normally and then invert every bit of the field. |
5754 | 61 | unsigned Tmp = MRI.createVirtualRegister(RC); |
5755 | 61 | BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); |
5756 | 61 | if (BitSize <= 32) |
5757 | 61 | // XILF with the upper BitSize bits set. |
5758 | 44 | BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) |
5759 | 44 | .addReg(Tmp).addImm(-1U << (32 - BitSize)); |
5760 | 17 | else { |
5761 | 17 | // Use LCGR and add -1 to the result, which is more compact than |
5762 | 17 | // an XILF, XILH pair. |
5763 | 17 | unsigned Tmp2 = MRI.createVirtualRegister(RC); |
5764 | 17 | BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); |
5765 | 17 | BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) |
5766 | 17 | .addReg(Tmp2).addImm(-1); |
5767 | 17 | } |
5768 | 366 | } else if (305 BinOpcode305 ) |
5769 | 305 | // A simply binary operation. |
5770 | 280 | BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) |
5771 | 280 | .addReg(RotatedOldVal) |
5772 | 280 | .add(Src2); |
5773 | 25 | else if (25 IsSubWord25 ) |
5774 | 25 | // Use RISBG to rotate Src2 into position and use it to replace the |
5775 | 25 | // field in RotatedOldVal. |
5776 | 8 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) |
5777 | 8 | .addReg(RotatedOldVal).addReg(Src2.getReg()) |
5778 | 8 | .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); |
5779 | 366 | if (IsSubWord) |
5780 | 224 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) |
5781 | 224 | .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); |
5782 | 366 | BuildMI(MBB, DL, TII->get(CSOpcode), Dest) |
5783 | 366 | .addReg(OldVal) |
5784 | 366 | .addReg(NewVal) |
5785 | 366 | .add(Base) |
5786 | 366 | .addImm(Disp); |
5787 | 366 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
5788 | 366 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); |
5789 | 366 | MBB->addSuccessor(LoopMBB); |
5790 | 366 | MBB->addSuccessor(DoneMBB); |
5791 | 366 | |
5792 | 366 | MI.eraseFromParent(); |
5793 | 366 | return DoneMBB; |
5794 | 366 | } |
5795 | | |
5796 | | // Implement EmitInstrWithCustomInserter for pseudo |
5797 | | // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the |
5798 | | // instruction that should be used to compare the current field with the |
5799 | | // minimum or maximum value. KeepOldMask is the BRC condition-code mask |
5800 | | // for when the current field should be kept. BitSize is the width of |
5801 | | // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. |
5802 | | MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( |
5803 | | MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode, |
5804 | 71 | unsigned KeepOldMask, unsigned BitSize) const { |
5805 | 71 | MachineFunction &MF = *MBB->getParent(); |
5806 | 71 | const SystemZInstrInfo *TII = |
5807 | 71 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
5808 | 71 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
5809 | 71 | bool IsSubWord = (BitSize < 32); |
5810 | 71 | |
5811 | 71 | // Extract the operands. Base can be a register or a frame index. |
5812 | 71 | unsigned Dest = MI.getOperand(0).getReg(); |
5813 | 71 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); |
5814 | 71 | int64_t Disp = MI.getOperand(2).getImm(); |
5815 | 71 | unsigned Src2 = MI.getOperand(3).getReg(); |
5816 | 71 | unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg()48 : 023 ); |
5817 | 71 | unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg()48 : 023 ); |
5818 | 71 | DebugLoc DL = MI.getDebugLoc(); |
5819 | 71 | if (IsSubWord) |
5820 | 48 | BitSize = MI.getOperand(6).getImm(); |
5821 | 71 | |
5822 | 71 | // Subword operations use 32-bit registers. |
5823 | 71 | const TargetRegisterClass *RC = (BitSize <= 32 ? |
5824 | 61 | &SystemZ::GR32BitRegClass : |
5825 | 10 | &SystemZ::GR64BitRegClass); |
5826 | 71 | unsigned LOpcode = BitSize <= 32 ? SystemZ::L61 : SystemZ::LG10 ; |
5827 | 71 | unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS61 : SystemZ::CSG10 ; |
5828 | 71 | |
5829 | 71 | // Get the right opcodes for the displacement. |
5830 | 71 | LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); |
5831 | 71 | CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); |
5832 | 71 | assert(LOpcode && CSOpcode && "Displacement out of range"); |
5833 | 71 | |
5834 | 71 | // Create virtual registers for temporary results. |
5835 | 71 | unsigned OrigVal = MRI.createVirtualRegister(RC); |
5836 | 71 | unsigned OldVal = MRI.createVirtualRegister(RC); |
5837 | 71 | unsigned NewVal = MRI.createVirtualRegister(RC); |
5838 | 71 | unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC)48 : OldVal23 ); |
5839 | 71 | unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC)48 : Src223 ); |
5840 | 71 | unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC)48 : NewVal23 ); |
5841 | 71 | |
5842 | 71 | // Insert 3 basic blocks for the loop. |
5843 | 71 | MachineBasicBlock *StartMBB = MBB; |
5844 | 71 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); |
5845 | 71 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); |
5846 | 71 | MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); |
5847 | 71 | MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); |
5848 | 71 | |
5849 | 71 | // StartMBB: |
5850 | 71 | // ... |
5851 | 71 | // %OrigVal = L Disp(%Base) |
5852 | 71 | // # fall through to LoopMMB |
5853 | 71 | MBB = StartMBB; |
5854 | 71 | BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); |
5855 | 71 | MBB->addSuccessor(LoopMBB); |
5856 | 71 | |
5857 | 71 | // LoopMBB: |
5858 | 71 | // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] |
5859 | 71 | // %RotatedOldVal = RLL %OldVal, 0(%BitShift) |
5860 | 71 | // CompareOpcode %RotatedOldVal, %Src2 |
5861 | 71 | // BRC KeepOldMask, UpdateMBB |
5862 | 71 | MBB = LoopMBB; |
5863 | 71 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) |
5864 | 71 | .addReg(OrigVal).addMBB(StartMBB) |
5865 | 71 | .addReg(Dest).addMBB(UpdateMBB); |
5866 | 71 | if (IsSubWord) |
5867 | 48 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) |
5868 | 48 | .addReg(OldVal).addReg(BitShift).addImm(0); |
5869 | 71 | BuildMI(MBB, DL, TII->get(CompareOpcode)) |
5870 | 71 | .addReg(RotatedOldVal).addReg(Src2); |
5871 | 71 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
5872 | 71 | .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); |
5873 | 71 | MBB->addSuccessor(UpdateMBB); |
5874 | 71 | MBB->addSuccessor(UseAltMBB); |
5875 | 71 | |
5876 | 71 | // UseAltMBB: |
5877 | 71 | // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 |
5878 | 71 | // # fall through to UpdateMMB |
5879 | 71 | MBB = UseAltMBB; |
5880 | 71 | if (IsSubWord) |
5881 | 48 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) |
5882 | 48 | .addReg(RotatedOldVal).addReg(Src2) |
5883 | 48 | .addImm(32).addImm(31 + BitSize).addImm(0); |
5884 | 71 | MBB->addSuccessor(UpdateMBB); |
5885 | 71 | |
5886 | 71 | // UpdateMBB: |
5887 | 71 | // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], |
5888 | 71 | // [ %RotatedAltVal, UseAltMBB ] |
5889 | 71 | // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) |
5890 | 71 | // %Dest = CS %OldVal, %NewVal, Disp(%Base) |
5891 | 71 | // JNE LoopMBB |
5892 | 71 | // # fall through to DoneMMB |
5893 | 71 | MBB = UpdateMBB; |
5894 | 71 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) |
5895 | 71 | .addReg(RotatedOldVal).addMBB(LoopMBB) |
5896 | 71 | .addReg(RotatedAltVal).addMBB(UseAltMBB); |
5897 | 71 | if (IsSubWord) |
5898 | 48 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) |
5899 | 48 | .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); |
5900 | 71 | BuildMI(MBB, DL, TII->get(CSOpcode), Dest) |
5901 | 71 | .addReg(OldVal) |
5902 | 71 | .addReg(NewVal) |
5903 | 71 | .add(Base) |
5904 | 71 | .addImm(Disp); |
5905 | 71 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
5906 | 71 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); |
5907 | 71 | MBB->addSuccessor(LoopMBB); |
5908 | 71 | MBB->addSuccessor(DoneMBB); |
5909 | 71 | |
5910 | 71 | MI.eraseFromParent(); |
5911 | 71 | return DoneMBB; |
5912 | 71 | } |
5913 | | |
5914 | | // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW |
5915 | | // instruction MI. |
5916 | | MachineBasicBlock * |
5917 | | SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, |
5918 | 16 | MachineBasicBlock *MBB) const { |
5919 | 16 | |
5920 | 16 | MachineFunction &MF = *MBB->getParent(); |
5921 | 16 | const SystemZInstrInfo *TII = |
5922 | 16 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
5923 | 16 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
5924 | 16 | |
5925 | 16 | // Extract the operands. Base can be a register or a frame index. |
5926 | 16 | unsigned Dest = MI.getOperand(0).getReg(); |
5927 | 16 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); |
5928 | 16 | int64_t Disp = MI.getOperand(2).getImm(); |
5929 | 16 | unsigned OrigCmpVal = MI.getOperand(3).getReg(); |
5930 | 16 | unsigned OrigSwapVal = MI.getOperand(4).getReg(); |
5931 | 16 | unsigned BitShift = MI.getOperand(5).getReg(); |
5932 | 16 | unsigned NegBitShift = MI.getOperand(6).getReg(); |
5933 | 16 | int64_t BitSize = MI.getOperand(7).getImm(); |
5934 | 16 | DebugLoc DL = MI.getDebugLoc(); |
5935 | 16 | |
5936 | 16 | const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; |
5937 | 16 | |
5938 | 16 | // Get the right opcodes for the displacement. |
5939 | 16 | unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); |
5940 | 16 | unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); |
5941 | 16 | assert(LOpcode && CSOpcode && "Displacement out of range"); |
5942 | 16 | |
5943 | 16 | // Create virtual registers for temporary results. |
5944 | 16 | unsigned OrigOldVal = MRI.createVirtualRegister(RC); |
5945 | 16 | unsigned OldVal = MRI.createVirtualRegister(RC); |
5946 | 16 | unsigned CmpVal = MRI.createVirtualRegister(RC); |
5947 | 16 | unsigned SwapVal = MRI.createVirtualRegister(RC); |
5948 | 16 | unsigned StoreVal = MRI.createVirtualRegister(RC); |
5949 | 16 | unsigned RetryOldVal = MRI.createVirtualRegister(RC); |
5950 | 16 | unsigned RetryCmpVal = MRI.createVirtualRegister(RC); |
5951 | 16 | unsigned RetrySwapVal = MRI.createVirtualRegister(RC); |
5952 | 16 | |
5953 | 16 | // Insert 2 basic blocks for the loop. |
5954 | 16 | MachineBasicBlock *StartMBB = MBB; |
5955 | 16 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); |
5956 | 16 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); |
5957 | 16 | MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); |
5958 | 16 | |
5959 | 16 | // StartMBB: |
5960 | 16 | // ... |
5961 | 16 | // %OrigOldVal = L Disp(%Base) |
5962 | 16 | // # fall through to LoopMMB |
5963 | 16 | MBB = StartMBB; |
5964 | 16 | BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) |
5965 | 16 | .add(Base) |
5966 | 16 | .addImm(Disp) |
5967 | 16 | .addReg(0); |
5968 | 16 | MBB->addSuccessor(LoopMBB); |
5969 | 16 | |
5970 | 16 | // LoopMBB: |
5971 | 16 | // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] |
5972 | 16 | // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] |
5973 | 16 | // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] |
5974 | 16 | // %Dest = RLL %OldVal, BitSize(%BitShift) |
5975 | 16 | // ^^ The low BitSize bits contain the field |
5976 | 16 | // of interest. |
5977 | 16 | // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 |
5978 | 16 | // ^^ Replace the upper 32-BitSize bits of the |
5979 | 16 | // comparison value with those that we loaded, |
5980 | 16 | // so that we can use a full word comparison. |
5981 | 16 | // CR %Dest, %RetryCmpVal |
5982 | 16 | // JNE DoneMBB |
5983 | 16 | // # Fall through to SetMBB |
5984 | 16 | MBB = LoopMBB; |
5985 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) |
5986 | 16 | .addReg(OrigOldVal).addMBB(StartMBB) |
5987 | 16 | .addReg(RetryOldVal).addMBB(SetMBB); |
5988 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) |
5989 | 16 | .addReg(OrigCmpVal).addMBB(StartMBB) |
5990 | 16 | .addReg(RetryCmpVal).addMBB(SetMBB); |
5991 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) |
5992 | 16 | .addReg(OrigSwapVal).addMBB(StartMBB) |
5993 | 16 | .addReg(RetrySwapVal).addMBB(SetMBB); |
5994 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) |
5995 | 16 | .addReg(OldVal).addReg(BitShift).addImm(BitSize); |
5996 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) |
5997 | 16 | .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); |
5998 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::CR)) |
5999 | 16 | .addReg(Dest).addReg(RetryCmpVal); |
6000 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
6001 | 16 | .addImm(SystemZ::CCMASK_ICMP) |
6002 | 16 | .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); |
6003 | 16 | MBB->addSuccessor(DoneMBB); |
6004 | 16 | MBB->addSuccessor(SetMBB); |
6005 | 16 | |
6006 | 16 | // SetMBB: |
6007 | 16 | // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 |
6008 | 16 | // ^^ Replace the upper 32-BitSize bits of the new |
6009 | 16 | // value with those that we loaded. |
6010 | 16 | // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) |
6011 | 16 | // ^^ Rotate the new field to its proper position. |
6012 | 16 | // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) |
6013 | 16 | // JNE LoopMBB |
6014 | 16 | // # fall through to ExitMMB |
6015 | 16 | MBB = SetMBB; |
6016 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) |
6017 | 16 | .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); |
6018 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) |
6019 | 16 | .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); |
6020 | 16 | BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) |
6021 | 16 | .addReg(OldVal) |
6022 | 16 | .addReg(StoreVal) |
6023 | 16 | .add(Base) |
6024 | 16 | .addImm(Disp); |
6025 | 16 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
6026 | 16 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); |
6027 | 16 | MBB->addSuccessor(LoopMBB); |
6028 | 16 | MBB->addSuccessor(DoneMBB); |
6029 | 16 | |
6030 | 16 | MI.eraseFromParent(); |
6031 | 16 | return DoneMBB; |
6032 | 16 | } |
6033 | | |
6034 | | // Emit a move from two GR64s to a GR128. |
6035 | | MachineBasicBlock * |
6036 | | SystemZTargetLowering::emitPair128(MachineInstr &MI, |
6037 | 22 | MachineBasicBlock *MBB) const { |
6038 | 22 | MachineFunction &MF = *MBB->getParent(); |
6039 | 22 | const SystemZInstrInfo *TII = |
6040 | 22 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
6041 | 22 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
6042 | 22 | DebugLoc DL = MI.getDebugLoc(); |
6043 | 22 | |
6044 | 22 | unsigned Dest = MI.getOperand(0).getReg(); |
6045 | 22 | unsigned Hi = MI.getOperand(1).getReg(); |
6046 | 22 | unsigned Lo = MI.getOperand(2).getReg(); |
6047 | 22 | unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
6048 | 22 | unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
6049 | 22 | |
6050 | 22 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); |
6051 | 22 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) |
6052 | 22 | .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64); |
6053 | 22 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) |
6054 | 22 | .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64); |
6055 | 22 | |
6056 | 22 | MI.eraseFromParent(); |
6057 | 22 | return MBB; |
6058 | 22 | } |
6059 | | |
6060 | | // Emit an extension from a GR64 to a GR128. ClearEven is true |
6061 | | // if the high register of the GR128 value must be cleared or false if |
6062 | | // it's "don't care". |
6063 | | MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, |
6064 | | MachineBasicBlock *MBB, |
6065 | 212 | bool ClearEven) const { |
6066 | 212 | MachineFunction &MF = *MBB->getParent(); |
6067 | 212 | const SystemZInstrInfo *TII = |
6068 | 212 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
6069 | 212 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
6070 | 212 | DebugLoc DL = MI.getDebugLoc(); |
6071 | 212 | |
6072 | 212 | unsigned Dest = MI.getOperand(0).getReg(); |
6073 | 212 | unsigned Src = MI.getOperand(1).getReg(); |
6074 | 212 | unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
6075 | 212 | |
6076 | 212 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); |
6077 | 212 | if (ClearEven212 ) { |
6078 | 76 | unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
6079 | 76 | unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); |
6080 | 76 | |
6081 | 76 | BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) |
6082 | 76 | .addImm(0); |
6083 | 76 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) |
6084 | 76 | .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); |
6085 | 76 | In128 = NewIn128; |
6086 | 76 | } |
6087 | 212 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) |
6088 | 212 | .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); |
6089 | 212 | |
6090 | 212 | MI.eraseFromParent(); |
6091 | 212 | return MBB; |
6092 | 212 | } |
6093 | | |
6094 | | MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( |
6095 | 126 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { |
6096 | 126 | MachineFunction &MF = *MBB->getParent(); |
6097 | 126 | const SystemZInstrInfo *TII = |
6098 | 126 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
6099 | 126 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
6100 | 126 | DebugLoc DL = MI.getDebugLoc(); |
6101 | 126 | |
6102 | 126 | MachineOperand DestBase = earlyUseOperand(MI.getOperand(0)); |
6103 | 126 | uint64_t DestDisp = MI.getOperand(1).getImm(); |
6104 | 126 | MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2)); |
6105 | 126 | uint64_t SrcDisp = MI.getOperand(3).getImm(); |
6106 | 126 | uint64_t Length = MI.getOperand(4).getImm(); |
6107 | 126 | |
6108 | 126 | // When generating more than one CLC, all but the last will need to |
6109 | 126 | // branch to the end when a difference is found. |
6110 | 26 | MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? |
6111 | 126 | splitBlockAfter(MI, MBB)6 : nullptr120 ); |
6112 | 126 | |
6113 | 126 | // Check for the loop form, in which operand 5 is the trip count. |
6114 | 126 | if (MI.getNumExplicitOperands() > 5126 ) { |
6115 | 4 | bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); |
6116 | 4 | |
6117 | 4 | uint64_t StartCountReg = MI.getOperand(5).getReg(); |
6118 | 4 | uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); |
6119 | 2 | uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : |
6120 | 2 | forceReg(MI, DestBase, TII)); |
6121 | 4 | |
6122 | 4 | const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; |
6123 | 4 | uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); |
6124 | 2 | uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : |
6125 | 2 | MRI.createVirtualRegister(RC)); |
6126 | 4 | uint64_t NextSrcReg = MRI.createVirtualRegister(RC); |
6127 | 2 | uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : |
6128 | 2 | MRI.createVirtualRegister(RC)); |
6129 | 4 | |
6130 | 4 | RC = &SystemZ::GR64BitRegClass; |
6131 | 4 | uint64_t ThisCountReg = MRI.createVirtualRegister(RC); |
6132 | 4 | uint64_t NextCountReg = MRI.createVirtualRegister(RC); |
6133 | 4 | |
6134 | 4 | MachineBasicBlock *StartMBB = MBB; |
6135 | 4 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); |
6136 | 4 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); |
6137 | 4 | MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB)1 : LoopMBB3 ); |
6138 | 4 | |
6139 | 4 | // StartMBB: |
6140 | 4 | // # fall through to LoopMMB |
6141 | 4 | MBB->addSuccessor(LoopMBB); |
6142 | 4 | |
6143 | 4 | // LoopMBB: |
6144 | 4 | // %ThisDestReg = phi [ %StartDestReg, StartMBB ], |
6145 | 4 | // [ %NextDestReg, NextMBB ] |
6146 | 4 | // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], |
6147 | 4 | // [ %NextSrcReg, NextMBB ] |
6148 | 4 | // %ThisCountReg = phi [ %StartCountReg, StartMBB ], |
6149 | 4 | // [ %NextCountReg, NextMBB ] |
6150 | 4 | // ( PFD 2, 768+DestDisp(%ThisDestReg) ) |
6151 | 4 | // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) |
6152 | 4 | // ( JLH EndMBB ) |
6153 | 4 | // |
6154 | 4 | // The prefetch is used only for MVC. The JLH is used only for CLC. |
6155 | 4 | MBB = LoopMBB; |
6156 | 4 | |
6157 | 4 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) |
6158 | 4 | .addReg(StartDestReg).addMBB(StartMBB) |
6159 | 4 | .addReg(NextDestReg).addMBB(NextMBB); |
6160 | 4 | if (!HaveSingleBase) |
6161 | 2 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) |
6162 | 2 | .addReg(StartSrcReg).addMBB(StartMBB) |
6163 | 2 | .addReg(NextSrcReg).addMBB(NextMBB); |
6164 | 4 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) |
6165 | 4 | .addReg(StartCountReg).addMBB(StartMBB) |
6166 | 4 | .addReg(NextCountReg).addMBB(NextMBB); |
6167 | 4 | if (Opcode == SystemZ::MVC) |
6168 | 3 | BuildMI(MBB, DL, TII->get(SystemZ::PFD)) |
6169 | 3 | .addImm(SystemZ::PFD_WRITE) |
6170 | 3 | .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); |
6171 | 4 | BuildMI(MBB, DL, TII->get(Opcode)) |
6172 | 4 | .addReg(ThisDestReg).addImm(DestDisp).addImm(256) |
6173 | 4 | .addReg(ThisSrcReg).addImm(SrcDisp); |
6174 | 4 | if (EndMBB4 ) { |
6175 | 1 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
6176 | 1 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) |
6177 | 1 | .addMBB(EndMBB); |
6178 | 1 | MBB->addSuccessor(EndMBB); |
6179 | 1 | MBB->addSuccessor(NextMBB); |
6180 | 1 | } |
6181 | 4 | |
6182 | 4 | // NextMBB: |
6183 | 4 | // %NextDestReg = LA 256(%ThisDestReg) |
6184 | 4 | // %NextSrcReg = LA 256(%ThisSrcReg) |
6185 | 4 | // %NextCountReg = AGHI %ThisCountReg, -1 |
6186 | 4 | // CGHI %NextCountReg, 0 |
6187 | 4 | // JLH LoopMBB |
6188 | 4 | // # fall through to DoneMMB |
6189 | 4 | // |
6190 | 4 | // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. |
6191 | 4 | MBB = NextMBB; |
6192 | 4 | |
6193 | 4 | BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) |
6194 | 4 | .addReg(ThisDestReg).addImm(256).addReg(0); |
6195 | 4 | if (!HaveSingleBase) |
6196 | 2 | BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) |
6197 | 2 | .addReg(ThisSrcReg).addImm(256).addReg(0); |
6198 | 4 | BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) |
6199 | 4 | .addReg(ThisCountReg).addImm(-1); |
6200 | 4 | BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) |
6201 | 4 | .addReg(NextCountReg).addImm(0); |
6202 | 4 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
6203 | 4 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) |
6204 | 4 | .addMBB(LoopMBB); |
6205 | 4 | MBB->addSuccessor(LoopMBB); |
6206 | 4 | MBB->addSuccessor(DoneMBB); |
6207 | 4 | |
6208 | 4 | DestBase = MachineOperand::CreateReg(NextDestReg, false); |
6209 | 4 | SrcBase = MachineOperand::CreateReg(NextSrcReg, false); |
6210 | 4 | Length &= 255; |
6211 | 4 | MBB = DoneMBB; |
6212 | 4 | } |
6213 | 126 | // Handle any remaining bytes with straight-line code. |
6214 | 296 | while (Length > 0296 ) { |
6215 | 170 | uint64_t ThisLength = std::min(Length, uint64_t(256)); |
6216 | 170 | // The previous iteration might have created out-of-range displacements. |
6217 | 170 | // Apply them using LAY if so. |
6218 | 170 | if (!isUInt<12>(DestDisp)170 ) { |
6219 | 2 | unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
6220 | 2 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) |
6221 | 2 | .add(DestBase) |
6222 | 2 | .addImm(DestDisp) |
6223 | 2 | .addReg(0); |
6224 | 2 | DestBase = MachineOperand::CreateReg(Reg, false); |
6225 | 2 | DestDisp = 0; |
6226 | 2 | } |
6227 | 170 | if (!isUInt<12>(SrcDisp)170 ) { |
6228 | 2 | unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
6229 | 2 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) |
6230 | 2 | .add(SrcBase) |
6231 | 2 | .addImm(SrcDisp) |
6232 | 2 | .addReg(0); |
6233 | 2 | SrcBase = MachineOperand::CreateReg(Reg, false); |
6234 | 2 | SrcDisp = 0; |
6235 | 2 | } |
6236 | 170 | BuildMI(*MBB, MI, DL, TII->get(Opcode)) |
6237 | 170 | .add(DestBase) |
6238 | 170 | .addImm(DestDisp) |
6239 | 170 | .addImm(ThisLength) |
6240 | 170 | .add(SrcBase) |
6241 | 170 | .addImm(SrcDisp) |
6242 | 170 | ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); |
6243 | 170 | DestDisp += ThisLength; |
6244 | 170 | SrcDisp += ThisLength; |
6245 | 170 | Length -= ThisLength; |
6246 | 170 | // If there's another CLC to go, branch to the end if a difference |
6247 | 170 | // was found. |
6248 | 170 | if (EndMBB && 170 Length > 013 ) { |
6249 | 7 | MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); |
6250 | 7 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
6251 | 7 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) |
6252 | 7 | .addMBB(EndMBB); |
6253 | 7 | MBB->addSuccessor(EndMBB); |
6254 | 7 | MBB->addSuccessor(NextMBB); |
6255 | 7 | MBB = NextMBB; |
6256 | 7 | } |
6257 | 170 | } |
6258 | 126 | if (EndMBB126 ) { |
6259 | 6 | MBB->addSuccessor(EndMBB); |
6260 | 6 | MBB = EndMBB; |
6261 | 6 | MBB->addLiveIn(SystemZ::CC); |
6262 | 6 | } |
6263 | 126 | |
6264 | 126 | MI.eraseFromParent(); |
6265 | 126 | return MBB; |
6266 | 126 | } |
6267 | | |
6268 | | // Decompose string pseudo-instruction MI into a loop that continually performs |
6269 | | // Opcode until CC != 3. |
6270 | | MachineBasicBlock *SystemZTargetLowering::emitStringWrapper( |
6271 | 13 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { |
6272 | 13 | MachineFunction &MF = *MBB->getParent(); |
6273 | 13 | const SystemZInstrInfo *TII = |
6274 | 13 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
6275 | 13 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
6276 | 13 | DebugLoc DL = MI.getDebugLoc(); |
6277 | 13 | |
6278 | 13 | uint64_t End1Reg = MI.getOperand(0).getReg(); |
6279 | 13 | uint64_t Start1Reg = MI.getOperand(1).getReg(); |
6280 | 13 | uint64_t Start2Reg = MI.getOperand(2).getReg(); |
6281 | 13 | uint64_t CharReg = MI.getOperand(3).getReg(); |
6282 | 13 | |
6283 | 13 | const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; |
6284 | 13 | uint64_t This1Reg = MRI.createVirtualRegister(RC); |
6285 | 13 | uint64_t This2Reg = MRI.createVirtualRegister(RC); |
6286 | 13 | uint64_t End2Reg = MRI.createVirtualRegister(RC); |
6287 | 13 | |
6288 | 13 | MachineBasicBlock *StartMBB = MBB; |
6289 | 13 | MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); |
6290 | 13 | MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); |
6291 | 13 | |
6292 | 13 | // StartMBB: |
6293 | 13 | // # fall through to LoopMMB |
6294 | 13 | MBB->addSuccessor(LoopMBB); |
6295 | 13 | |
6296 | 13 | // LoopMBB: |
6297 | 13 | // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] |
6298 | 13 | // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] |
6299 | 13 | // R0L = %CharReg |
6300 | 13 | // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L |
6301 | 13 | // JO LoopMBB |
6302 | 13 | // # fall through to DoneMMB |
6303 | 13 | // |
6304 | 13 | // The load of R0L can be hoisted by post-RA LICM. |
6305 | 13 | MBB = LoopMBB; |
6306 | 13 | |
6307 | 13 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) |
6308 | 13 | .addReg(Start1Reg).addMBB(StartMBB) |
6309 | 13 | .addReg(End1Reg).addMBB(LoopMBB); |
6310 | 13 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) |
6311 | 13 | .addReg(Start2Reg).addMBB(StartMBB) |
6312 | 13 | .addReg(End2Reg).addMBB(LoopMBB); |
6313 | 13 | BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); |
6314 | 13 | BuildMI(MBB, DL, TII->get(Opcode)) |
6315 | 13 | .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) |
6316 | 13 | .addReg(This1Reg).addReg(This2Reg); |
6317 | 13 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
6318 | 13 | .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); |
6319 | 13 | MBB->addSuccessor(LoopMBB); |
6320 | 13 | MBB->addSuccessor(DoneMBB); |
6321 | 13 | |
6322 | 13 | DoneMBB->addLiveIn(SystemZ::CC); |
6323 | 13 | |
6324 | 13 | MI.eraseFromParent(); |
6325 | 13 | return DoneMBB; |
6326 | 13 | } |
6327 | | |
6328 | | // Update TBEGIN instruction with final opcode and register clobbers. |
6329 | | MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin( |
6330 | | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode, |
6331 | 12 | bool NoFloat) const { |
6332 | 12 | MachineFunction &MF = *MBB->getParent(); |
6333 | 12 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); |
6334 | 12 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
6335 | 12 | |
6336 | 12 | // Update opcode. |
6337 | 12 | MI.setDesc(TII->get(Opcode)); |
6338 | 12 | |
6339 | 12 | // We cannot handle a TBEGIN that clobbers the stack or frame pointer. |
6340 | 12 | // Make sure to add the corresponding GRSM bits if they are missing. |
6341 | 12 | uint64_t Control = MI.getOperand(2).getImm(); |
6342 | 12 | static const unsigned GPRControlBit[16] = { |
6343 | 12 | 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000, |
6344 | 12 | 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 |
6345 | 12 | }; |
6346 | 12 | Control |= GPRControlBit[15]; |
6347 | 12 | if (TFI->hasFP(MF)) |
6348 | 1 | Control |= GPRControlBit[11]; |
6349 | 12 | MI.getOperand(2).setImm(Control); |
6350 | 12 | |
6351 | 12 | // Add GPR clobbers. |
6352 | 204 | for (int I = 0; I < 16204 ; I++192 ) { |
6353 | 192 | if ((Control & GPRControlBit[I]) == 0192 ) { |
6354 | 16 | unsigned Reg = SystemZMC::GR64Regs[I]; |
6355 | 16 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); |
6356 | 16 | } |
6357 | 192 | } |
6358 | 12 | |
6359 | 12 | // Add FPR/VR clobbers. |
6360 | 12 | if (!NoFloat && 12 (Control & 4) != 01 ) { |
6361 | 1 | if (Subtarget.hasVector()1 ) { |
6362 | 0 | for (int I = 0; I < 320 ; I++0 ) { |
6363 | 0 | unsigned Reg = SystemZMC::VR128Regs[I]; |
6364 | 0 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); |
6365 | 0 | } |
6366 | 1 | } else { |
6367 | 17 | for (int I = 0; I < 1617 ; I++16 ) { |
6368 | 16 | unsigned Reg = SystemZMC::FP64Regs[I]; |
6369 | 16 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); |
6370 | 16 | } |
6371 | 1 | } |
6372 | 1 | } |
6373 | 12 | |
6374 | 12 | return MBB; |
6375 | 12 | } |
6376 | | |
6377 | | MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( |
6378 | 8 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { |
6379 | 8 | MachineFunction &MF = *MBB->getParent(); |
6380 | 8 | MachineRegisterInfo *MRI = &MF.getRegInfo(); |
6381 | 8 | const SystemZInstrInfo *TII = |
6382 | 8 | static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
6383 | 8 | DebugLoc DL = MI.getDebugLoc(); |
6384 | 8 | |
6385 | 8 | unsigned SrcReg = MI.getOperand(0).getReg(); |
6386 | 8 | |
6387 | 8 | // Create new virtual register of the same class as source. |
6388 | 8 | const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); |
6389 | 8 | unsigned DstReg = MRI->createVirtualRegister(RC); |
6390 | 8 | |
6391 | 8 | // Replace pseudo with a normal load-and-test that models the def as |
6392 | 8 | // well. |
6393 | 8 | BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) |
6394 | 8 | .addReg(SrcReg); |
6395 | 8 | MI.eraseFromParent(); |
6396 | 8 | |
6397 | 8 | return MBB; |
6398 | 8 | } |
6399 | | |
6400 | | MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( |
6401 | 1.57k | MachineInstr &MI, MachineBasicBlock *MBB) const { |
6402 | 1.57k | switch (MI.getOpcode()) { |
6403 | 21 | case SystemZ::Select32Mux: |
6404 | 21 | return emitSelect(MI, MBB, |
6405 | 21 | Subtarget.hasLoadStoreOnCond2()? SystemZ::LOCRMux9 : 012 ); |
6406 | 26 | case SystemZ::Select32: |
6407 | 26 | return emitSelect(MI, MBB, SystemZ::LOCR); |
6408 | 107 | case SystemZ::Select64: |
6409 | 107 | return emitSelect(MI, MBB, SystemZ::LOCGR); |
6410 | 456 | case SystemZ::SelectF32: |
6411 | 456 | case SystemZ::SelectF64: |
6412 | 456 | case SystemZ::SelectF128: |
6413 | 456 | case SystemZ::SelectVR128: |
6414 | 456 | return emitSelect(MI, MBB, 0); |
6415 | 456 | |
6416 | 2 | case SystemZ::CondStore8Mux: |
6417 | 2 | return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); |
6418 | 0 | case SystemZ::CondStore8MuxInv: |
6419 | 0 | return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); |
6420 | 2 | case SystemZ::CondStore16Mux: |
6421 | 2 | return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); |
6422 | 0 | case SystemZ::CondStore16MuxInv: |
6423 | 0 | return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); |
6424 | 2 | case SystemZ::CondStore32Mux: |
6425 | 2 | return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false); |
6426 | 12 | case SystemZ::CondStore32MuxInv: |
6427 | 12 | return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true); |
6428 | 5 | case SystemZ::CondStore8: |
6429 | 5 | return emitCondStore(MI, MBB, SystemZ::STC, 0, false); |
6430 | 13 | case SystemZ::CondStore8Inv: |
6431 | 13 | return emitCondStore(MI, MBB, SystemZ::STC, 0, true); |
6432 | 5 | case SystemZ::CondStore16: |
6433 | 5 | return emitCondStore(MI, MBB, SystemZ::STH, 0, false); |
6434 | 13 | case SystemZ::CondStore16Inv: |
6435 | 13 | return emitCondStore(MI, MBB, SystemZ::STH, 0, true); |
6436 | 9 | case SystemZ::CondStore32: |
6437 | 9 | return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); |
6438 | 23 | case SystemZ::CondStore32Inv: |
6439 | 23 | return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); |
6440 | 2 | case SystemZ::CondStore64: |
6441 | 2 | return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); |
6442 | 13 | case SystemZ::CondStore64Inv: |
6443 | 13 | return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); |
6444 | 1 | case SystemZ::CondStoreF32: |
6445 | 1 | return emitCondStore(MI, MBB, SystemZ::STE, 0, false); |
6446 | 9 | case SystemZ::CondStoreF32Inv: |
6447 | 9 | return emitCondStore(MI, MBB, SystemZ::STE, 0, true); |
6448 | 1 | case SystemZ::CondStoreF64: |
6449 | 1 | return emitCondStore(MI, MBB, SystemZ::STD, 0, false); |
6450 | 9 | case SystemZ::CondStoreF64Inv: |
6451 | 9 | return emitCondStore(MI, MBB, SystemZ::STD, 0, true); |
6452 | 456 | |
6453 | 22 | case SystemZ::PAIR128: |
6454 | 22 | return emitPair128(MI, MBB); |
6455 | 136 | case SystemZ::AEXT128: |
6456 | 136 | return emitExt128(MI, MBB, false); |
6457 | 76 | case SystemZ::ZEXT128: |
6458 | 76 | return emitExt128(MI, MBB, true); |
6459 | 456 | |
6460 | 8 | case SystemZ::ATOMIC_SWAPW: |
6461 | 8 | return emitAtomicLoadBinary(MI, MBB, 0, 0); |
6462 | 10 | case SystemZ::ATOMIC_SWAP_32: |
6463 | 10 | return emitAtomicLoadBinary(MI, MBB, 0, 32); |
6464 | 7 | case SystemZ::ATOMIC_SWAP_64: |
6465 | 7 | return emitAtomicLoadBinary(MI, MBB, 0, 64); |
6466 | 456 | |
6467 | 6 | case SystemZ::ATOMIC_LOADW_AR: |
6468 | 6 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); |
6469 | 60 | case SystemZ::ATOMIC_LOADW_AFI: |
6470 | 60 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); |
6471 | 1 | case SystemZ::ATOMIC_LOAD_AR: |
6472 | 1 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); |
6473 | 8 | case SystemZ::ATOMIC_LOAD_AHI: |
6474 | 8 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); |
6475 | 8 | case SystemZ::ATOMIC_LOAD_AFI: |
6476 | 8 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); |
6477 | 3 | case SystemZ::ATOMIC_LOAD_AGR: |
6478 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); |
6479 | 8 | case SystemZ::ATOMIC_LOAD_AGHI: |
6480 | 8 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); |
6481 | 8 | case SystemZ::ATOMIC_LOAD_AGFI: |
6482 | 8 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); |
6483 | 456 | |
6484 | 6 | case SystemZ::ATOMIC_LOADW_SR: |
6485 | 6 | return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); |
6486 | 1 | case SystemZ::ATOMIC_LOAD_SR: |
6487 | 1 | return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); |
6488 | 3 | case SystemZ::ATOMIC_LOAD_SGR: |
6489 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); |
6490 | 456 | |
6491 | 6 | case SystemZ::ATOMIC_LOADW_NR: |
6492 | 6 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); |
6493 | 30 | case SystemZ::ATOMIC_LOADW_NILH: |
6494 | 30 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); |
6495 | 1 | case SystemZ::ATOMIC_LOAD_NR: |
6496 | 1 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); |
6497 | 2 | case SystemZ::ATOMIC_LOAD_NILL: |
6498 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); |
6499 | 2 | case SystemZ::ATOMIC_LOAD_NILH: |
6500 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); |
6501 | 3 | case SystemZ::ATOMIC_LOAD_NILF: |
6502 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); |
6503 | 4 | case SystemZ::ATOMIC_LOAD_NGR: |
6504 | 4 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); |
6505 | 2 | case SystemZ::ATOMIC_LOAD_NILL64: |
6506 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); |
6507 | 2 | case SystemZ::ATOMIC_LOAD_NILH64: |
6508 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); |
6509 | 2 | case SystemZ::ATOMIC_LOAD_NIHL64: |
6510 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); |
6511 | 2 | case SystemZ::ATOMIC_LOAD_NIHH64: |
6512 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); |
6513 | 2 | case SystemZ::ATOMIC_LOAD_NILF64: |
6514 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); |
6515 | 3 | case SystemZ::ATOMIC_LOAD_NIHF64: |
6516 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); |
6517 | 456 | |
6518 | 6 | case SystemZ::ATOMIC_LOADW_OR: |
6519 | 6 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); |
6520 | 30 | case SystemZ::ATOMIC_LOADW_OILH: |
6521 | 30 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); |
6522 | 1 | case SystemZ::ATOMIC_LOAD_OR: |
6523 | 1 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); |
6524 | 2 | case SystemZ::ATOMIC_LOAD_OILL: |
6525 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); |
6526 | 2 | case SystemZ::ATOMIC_LOAD_OILH: |
6527 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); |
6528 | 3 | case SystemZ::ATOMIC_LOAD_OILF: |
6529 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); |
6530 | 3 | case SystemZ::ATOMIC_LOAD_OGR: |
6531 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); |
6532 | 2 | case SystemZ::ATOMIC_LOAD_OILL64: |
6533 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); |
6534 | 2 | case SystemZ::ATOMIC_LOAD_OILH64: |
6535 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); |
6536 | 2 | case SystemZ::ATOMIC_LOAD_OIHL64: |
6537 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); |
6538 | 2 | case SystemZ::ATOMIC_LOAD_OIHH64: |
6539 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); |
6540 | 3 | case SystemZ::ATOMIC_LOAD_OILF64: |
6541 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); |
6542 | 2 | case SystemZ::ATOMIC_LOAD_OIHF64: |
6543 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); |
6544 | 456 | |
6545 | 6 | case SystemZ::ATOMIC_LOADW_XR: |
6546 | 6 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); |
6547 | 30 | case SystemZ::ATOMIC_LOADW_XILF: |
6548 | 30 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); |
6549 | 1 | case SystemZ::ATOMIC_LOAD_XR: |
6550 | 1 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); |
6551 | 3 | case SystemZ::ATOMIC_LOAD_XILF: |
6552 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); |
6553 | 3 | case SystemZ::ATOMIC_LOAD_XGR: |
6554 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); |
6555 | 2 | case SystemZ::ATOMIC_LOAD_XILF64: |
6556 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); |
6557 | 2 | case SystemZ::ATOMIC_LOAD_XIHF64: |
6558 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); |
6559 | 456 | |
6560 | 6 | case SystemZ::ATOMIC_LOADW_NRi: |
6561 | 6 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); |
6562 | 30 | case SystemZ::ATOMIC_LOADW_NILHi: |
6563 | 30 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); |
6564 | 1 | case SystemZ::ATOMIC_LOAD_NRi: |
6565 | 1 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); |
6566 | 2 | case SystemZ::ATOMIC_LOAD_NILLi: |
6567 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); |
6568 | 2 | case SystemZ::ATOMIC_LOAD_NILHi: |
6569 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); |
6570 | 3 | case SystemZ::ATOMIC_LOAD_NILFi: |
6571 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); |
6572 | 4 | case SystemZ::ATOMIC_LOAD_NGRi: |
6573 | 4 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); |
6574 | 2 | case SystemZ::ATOMIC_LOAD_NILL64i: |
6575 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); |
6576 | 2 | case SystemZ::ATOMIC_LOAD_NILH64i: |
6577 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); |
6578 | 2 | case SystemZ::ATOMIC_LOAD_NIHL64i: |
6579 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); |
6580 | 2 | case SystemZ::ATOMIC_LOAD_NIHH64i: |
6581 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); |
6582 | 2 | case SystemZ::ATOMIC_LOAD_NILF64i: |
6583 | 2 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); |
6584 | 3 | case SystemZ::ATOMIC_LOAD_NIHF64i: |
6585 | 3 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); |
6586 | 456 | |
6587 | 12 | case SystemZ::ATOMIC_LOADW_MIN: |
6588 | 12 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, |
6589 | 12 | SystemZ::CCMASK_CMP_LE, 0); |
6590 | 10 | case SystemZ::ATOMIC_LOAD_MIN_32: |
6591 | 10 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, |
6592 | 10 | SystemZ::CCMASK_CMP_LE, 32); |
6593 | 7 | case SystemZ::ATOMIC_LOAD_MIN_64: |
6594 | 7 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, |
6595 | 7 | SystemZ::CCMASK_CMP_LE, 64); |
6596 | 456 | |
6597 | 12 | case SystemZ::ATOMIC_LOADW_MAX: |
6598 | 12 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, |
6599 | 12 | SystemZ::CCMASK_CMP_GE, 0); |
6600 | 1 | case SystemZ::ATOMIC_LOAD_MAX_32: |
6601 | 1 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, |
6602 | 1 | SystemZ::CCMASK_CMP_GE, 32); |
6603 | 1 | case SystemZ::ATOMIC_LOAD_MAX_64: |
6604 | 1 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, |
6605 | 1 | SystemZ::CCMASK_CMP_GE, 64); |
6606 | 456 | |
6607 | 12 | case SystemZ::ATOMIC_LOADW_UMIN: |
6608 | 12 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, |
6609 | 12 | SystemZ::CCMASK_CMP_LE, 0); |
6610 | 1 | case SystemZ::ATOMIC_LOAD_UMIN_32: |
6611 | 1 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, |
6612 | 1 | SystemZ::CCMASK_CMP_LE, 32); |
6613 | 1 | case SystemZ::ATOMIC_LOAD_UMIN_64: |
6614 | 1 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, |
6615 | 1 | SystemZ::CCMASK_CMP_LE, 64); |
6616 | 456 | |
6617 | 12 | case SystemZ::ATOMIC_LOADW_UMAX: |
6618 | 12 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, |
6619 | 12 | SystemZ::CCMASK_CMP_GE, 0); |
6620 | 1 | case SystemZ::ATOMIC_LOAD_UMAX_32: |
6621 | 1 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, |
6622 | 1 | SystemZ::CCMASK_CMP_GE, 32); |
6623 | 1 | case SystemZ::ATOMIC_LOAD_UMAX_64: |
6624 | 1 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, |
6625 | 1 | SystemZ::CCMASK_CMP_GE, 64); |
6626 | 456 | |
6627 | 16 | case SystemZ::ATOMIC_CMP_SWAPW: |
6628 | 16 | return emitAtomicCmpSwapW(MI, MBB); |
6629 | 73 | case SystemZ::MVCSequence: |
6630 | 73 | case SystemZ::MVCLoop: |
6631 | 73 | return emitMemMemWrapper(MI, MBB, SystemZ::MVC); |
6632 | 17 | case SystemZ::NCSequence: |
6633 | 17 | case SystemZ::NCLoop: |
6634 | 17 | return emitMemMemWrapper(MI, MBB, SystemZ::NC); |
6635 | 4 | case SystemZ::OCSequence: |
6636 | 4 | case SystemZ::OCLoop: |
6637 | 4 | return emitMemMemWrapper(MI, MBB, SystemZ::OC); |
6638 | 20 | case SystemZ::XCSequence: |
6639 | 20 | case SystemZ::XCLoop: |
6640 | 20 | return emitMemMemWrapper(MI, MBB, SystemZ::XC); |
6641 | 12 | case SystemZ::CLCSequence: |
6642 | 12 | case SystemZ::CLCLoop: |
6643 | 12 | return emitMemMemWrapper(MI, MBB, SystemZ::CLC); |
6644 | 3 | case SystemZ::CLSTLoop: |
6645 | 3 | return emitStringWrapper(MI, MBB, SystemZ::CLST); |
6646 | 3 | case SystemZ::MVSTLoop: |
6647 | 3 | return emitStringWrapper(MI, MBB, SystemZ::MVST); |
6648 | 7 | case SystemZ::SRSTLoop: |
6649 | 7 | return emitStringWrapper(MI, MBB, SystemZ::SRST); |
6650 | 1 | case SystemZ::TBEGIN: |
6651 | 1 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false); |
6652 | 10 | case SystemZ::TBEGIN_nofloat: |
6653 | 10 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true); |
6654 | 1 | case SystemZ::TBEGINC: |
6655 | 1 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true); |
6656 | 4 | case SystemZ::LTEBRCompare_VecPseudo: |
6657 | 4 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR); |
6658 | 4 | case SystemZ::LTDBRCompare_VecPseudo: |
6659 | 4 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR); |
6660 | 0 | case SystemZ::LTXBRCompare_VecPseudo: |
6661 | 0 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR); |
6662 | 12 | |
6663 | 0 | default: |
6664 | 0 | llvm_unreachable("Unexpected instr type to insert"); |
6665 | 0 | } |
6666 | 0 | } |
6667 | | |
6668 | | // This is only used by the isel schedulers, and is needed only to prevent |
6669 | | // compiler from crashing when list-ilp is used. |
6670 | | const TargetRegisterClass * |
6671 | 177 | SystemZTargetLowering::getRepRegClassFor(MVT VT) const { |
6672 | 177 | if (VT == MVT::Untyped) |
6673 | 32 | return &SystemZ::ADDR128BitRegClass; |
6674 | 145 | return TargetLowering::getRepRegClassFor(VT); |
6675 | 145 | } |