/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===----------- PPCVSXSwapRemoval.cpp - Remove VSX LE Swaps -------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===---------------------------------------------------------------------===// |
9 | | // |
10 | | // This pass analyzes vector computations and removes unnecessary |
11 | | // doubleword swaps (xxswapd instructions). This pass is performed |
12 | | // only for little-endian VSX code generation. |
13 | | // |
14 | | // For this specific case, loads and stores of v4i32, v4f32, v2i64, |
15 | | // and v2f64 vectors are inefficient. These are implemented using |
16 | | // the lxvd2x and stxvd2x instructions, which invert the order of |
17 | | // doublewords in a vector register. Thus code generation inserts |
18 | | // an xxswapd after each such load, and prior to each such store. |
19 | | // |
20 | | // The extra xxswapd instructions reduce performance. The purpose |
21 | | // of this pass is to reduce the number of xxswapd instructions |
22 | | // required for correctness. |
23 | | // |
24 | | // The primary insight is that much code that operates on vectors |
25 | | // does not care about the relative order of elements in a register, |
26 | | // so long as the correct memory order is preserved. If we have a |
27 | | // computation where all input values are provided by lxvd2x/xxswapd, |
28 | | // all outputs are stored using xxswapd/lxvd2x, and all intermediate |
29 | | // computations are lane-insensitive (independent of element order), |
30 | | // then all the xxswapd instructions associated with the loads and |
31 | | // stores may be removed without changing observable semantics. |
32 | | // |
33 | | // This pass uses standard equivalence class infrastructure to create |
34 | | // maximal webs of computations fitting the above description. Each |
35 | | // such web is then optimized by removing its unnecessary xxswapd |
36 | | // instructions. |
37 | | // |
38 | | // There are some lane-sensitive operations for which we can still |
39 | | // permit the optimization, provided we modify those operations |
40 | | // accordingly. Such operations are identified as using "special |
41 | | // handling" within this module. |
42 | | // |
43 | | //===---------------------------------------------------------------------===// |
44 | | |
45 | | #include "PPC.h" |
46 | | #include "PPCInstrBuilder.h" |
47 | | #include "PPCInstrInfo.h" |
48 | | #include "PPCTargetMachine.h" |
49 | | #include "llvm/ADT/DenseMap.h" |
50 | | #include "llvm/ADT/EquivalenceClasses.h" |
51 | | #include "llvm/CodeGen/MachineFunctionPass.h" |
52 | | #include "llvm/CodeGen/MachineInstrBuilder.h" |
53 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
54 | | #include "llvm/Support/Debug.h" |
55 | | #include "llvm/Support/Format.h" |
56 | | #include "llvm/Support/raw_ostream.h" |
57 | | |
58 | | using namespace llvm; |
59 | | |
60 | | #define DEBUG_TYPE "ppc-vsx-swaps" |
61 | | |
62 | | namespace llvm { |
63 | | void initializePPCVSXSwapRemovalPass(PassRegistry&); |
64 | | } |
65 | | |
66 | | namespace { |
67 | | |
68 | | // A PPCVSXSwapEntry is created for each machine instruction that |
69 | | // is relevant to a vector computation. |
70 | | struct PPCVSXSwapEntry { |
71 | | // Pointer to the instruction. |
72 | | MachineInstr *VSEMI; |
73 | | |
74 | | // Unique ID (position in the swap vector). |
75 | | int VSEId; |
76 | | |
77 | | // Attributes of this node. |
78 | | unsigned int IsLoad : 1; |
79 | | unsigned int IsStore : 1; |
80 | | unsigned int IsSwap : 1; |
81 | | unsigned int MentionsPhysVR : 1; |
82 | | unsigned int IsSwappable : 1; |
83 | | unsigned int MentionsPartialVR : 1; |
84 | | unsigned int SpecialHandling : 3; |
85 | | unsigned int WebRejected : 1; |
86 | | unsigned int WillRemove : 1; |
87 | | }; |
88 | | |
89 | | enum SHValues { |
90 | | SH_NONE = 0, |
91 | | SH_EXTRACT, |
92 | | SH_INSERT, |
93 | | SH_NOSWAP_LD, |
94 | | SH_NOSWAP_ST, |
95 | | SH_SPLAT, |
96 | | SH_XXPERMDI, |
97 | | SH_COPYWIDEN |
98 | | }; |
99 | | |
100 | | struct PPCVSXSwapRemoval : public MachineFunctionPass { |
101 | | |
102 | | static char ID; |
103 | | const PPCInstrInfo *TII; |
104 | | MachineFunction *MF; |
105 | | MachineRegisterInfo *MRI; |
106 | | |
107 | | // Swap entries are allocated in a vector for better performance. |
108 | | std::vector<PPCVSXSwapEntry> SwapVector; |
109 | | |
110 | | // A mapping is maintained between machine instructions and |
111 | | // their swap entries. The key is the address of the MI. |
112 | | DenseMap<MachineInstr*, int> SwapMap; |
113 | | |
114 | | // Equivalence classes are used to gather webs of related computation. |
115 | | // Swap entries are represented by their VSEId fields. |
116 | | EquivalenceClasses<int> *EC; |
117 | | |
118 | 233 | PPCVSXSwapRemoval() : MachineFunctionPass(ID) { |
119 | 233 | initializePPCVSXSwapRemovalPass(*PassRegistry::getPassRegistry()); |
120 | 233 | } |
121 | | |
122 | | private: |
123 | | // Initialize data structures. |
124 | | void initialize(MachineFunction &MFParm); |
125 | | |
126 | | // Walk the machine instructions to gather vector usage information. |
127 | | // Return true iff vector mentions are present. |
128 | | bool gatherVectorInstructions(); |
129 | | |
130 | | // Add an entry to the swap vector and swap map. |
131 | | int addSwapEntry(MachineInstr *MI, PPCVSXSwapEntry &SwapEntry); |
132 | | |
133 | | // Hunt backwards through COPY and SUBREG_TO_REG chains for a |
134 | | // source register. VecIdx indicates the swap vector entry to |
135 | | // mark as mentioning a physical register if the search leads |
136 | | // to one. |
137 | | unsigned lookThruCopyLike(unsigned SrcReg, unsigned VecIdx); |
138 | | |
139 | | // Generate equivalence classes for related computations (webs). |
140 | | void formWebs(); |
141 | | |
142 | | // Analyze webs and determine those that cannot be optimized. |
143 | | void recordUnoptimizableWebs(); |
144 | | |
145 | | // Record which swap instructions can be safely removed. |
146 | | void markSwapsForRemoval(); |
147 | | |
148 | | // Remove swaps and update other instructions requiring special |
149 | | // handling. Return true iff any changes are made. |
150 | | bool removeSwaps(); |
151 | | |
152 | | // Insert a swap instruction from SrcReg to DstReg at the given |
153 | | // InsertPoint. |
154 | | void insertSwap(MachineInstr *MI, MachineBasicBlock::iterator InsertPoint, |
155 | | unsigned DstReg, unsigned SrcReg); |
156 | | |
157 | | // Update instructions requiring special handling. |
158 | | void handleSpecialSwappables(int EntryIdx); |
159 | | |
160 | | // Dump a description of the entries in the swap vector. |
161 | | void dumpSwapVector(); |
162 | | |
163 | | // Return true iff the given register is in the given class. |
164 | 277k | bool isRegInClass(unsigned Reg, const TargetRegisterClass *RC) { |
165 | 277k | if (TargetRegisterInfo::isVirtualRegister(Reg)) |
166 | 155k | return RC->hasSubClassEq(MRI->getRegClass(Reg)); |
167 | 122k | return RC->contains(Reg); |
168 | 122k | } |
169 | | |
170 | | // Return true iff the given register is a full vector register. |
171 | 56.1k | bool isVecReg(unsigned Reg) { |
172 | 56.1k | return (isRegInClass(Reg, &PPC::VSRCRegClass) || |
173 | 43.7k | isRegInClass(Reg, &PPC::VRRCRegClass)); |
174 | 56.1k | } |
175 | | |
176 | | // Return true iff the given register is a partial vector register. |
177 | 92.1k | bool isScalarVecReg(unsigned Reg) { |
178 | 92.1k | return (isRegInClass(Reg, &PPC::VSFRCRegClass) || |
179 | 85.8k | isRegInClass(Reg, &PPC::VSSRCRegClass)); |
180 | 92.1k | } |
181 | | |
182 | | // Return true iff the given register mentions all or part of a |
183 | | // vector register. Also sets Partial to true if the mention |
184 | | // is for just the floating-point register overlap of the register. |
185 | 41.3k | bool isAnyVecReg(unsigned Reg, bool &Partial) { |
186 | 41.3k | if (isScalarVecReg(Reg)) |
187 | 1.58k | Partial = true; |
188 | 39.7k | return isScalarVecReg(Reg) || isVecReg(Reg); |
189 | 41.3k | } |
190 | | |
191 | | public: |
192 | | // Main entry point for this pass. |
193 | 2.41k | bool runOnMachineFunction(MachineFunction &MF) override { |
194 | 2.41k | if (skipFunction(*MF.getFunction())) |
195 | 0 | return false; |
196 | 2.41k | |
197 | 2.41k | // If we don't have VSX on the subtarget, don't do anything. |
198 | 2.41k | // Also, on Power 9 the load and store ops preserve element order and so |
199 | 2.41k | // the swaps are not required. |
200 | 2.41k | const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>(); |
201 | 2.41k | if (!STI.hasVSX() || 2.41k !STI.needsSwapsForVSXMemOps()2.29k ) |
202 | 639 | return false; |
203 | 1.77k | |
204 | 1.77k | bool Changed = false; |
205 | 1.77k | initialize(MF); |
206 | 1.77k | |
207 | 1.77k | if (gatherVectorInstructions()1.77k ) { |
208 | 711 | formWebs(); |
209 | 711 | recordUnoptimizableWebs(); |
210 | 711 | markSwapsForRemoval(); |
211 | 711 | Changed = removeSwaps(); |
212 | 711 | } |
213 | 2.41k | |
214 | 2.41k | // FIXME: See the allocation of EC in initialize(). |
215 | 2.41k | delete EC; |
216 | 2.41k | return Changed; |
217 | 2.41k | } |
218 | | }; |
219 | | |
220 | | // Initialize data structures for this pass. In particular, clear the |
221 | | // swap vector and allocate the equivalence class mapping before |
222 | | // processing each function. |
223 | 1.77k | void PPCVSXSwapRemoval::initialize(MachineFunction &MFParm) { |
224 | 1.77k | MF = &MFParm; |
225 | 1.77k | MRI = &MF->getRegInfo(); |
226 | 1.77k | TII = MF->getSubtarget<PPCSubtarget>().getInstrInfo(); |
227 | 1.77k | |
228 | 1.77k | // An initial vector size of 256 appears to work well in practice. |
229 | 1.77k | // Small/medium functions with vector content tend not to incur a |
230 | 1.77k | // reallocation at this size. Three of the vector tests in |
231 | 1.77k | // projects/test-suite reallocate, which seems like a reasonable rate. |
232 | 1.77k | const int InitialVectorSize(256); |
233 | 1.77k | SwapVector.clear(); |
234 | 1.77k | SwapVector.reserve(InitialVectorSize); |
235 | 1.77k | |
236 | 1.77k | // FIXME: Currently we allocate EC each time because we don't have |
237 | 1.77k | // access to the set representation on which to call clear(). Should |
238 | 1.77k | // consider adding a clear() method to the EquivalenceClasses class. |
239 | 1.77k | EC = new EquivalenceClasses<int>; |
240 | 1.77k | } |
241 | | |
242 | | // Create an entry in the swap vector for each instruction that mentions |
243 | | // a full vector register, recording various characteristics of the |
244 | | // instructions there. |
245 | 1.77k | bool PPCVSXSwapRemoval::gatherVectorInstructions() { |
246 | 1.77k | bool RelevantFunction = false; |
247 | 1.77k | |
248 | 3.95k | for (MachineBasicBlock &MBB : *MF) { |
249 | 21.7k | for (MachineInstr &MI : MBB) { |
250 | 21.7k | |
251 | 21.7k | if (MI.isDebugValue()) |
252 | 1 | continue; |
253 | 21.7k | |
254 | 21.7k | bool RelevantInstr = false; |
255 | 21.7k | bool Partial = false; |
256 | 21.7k | |
257 | 53.0k | for (const MachineOperand &MO : MI.operands()) { |
258 | 53.0k | if (!MO.isReg()) |
259 | 11.7k | continue; |
260 | 41.3k | unsigned Reg = MO.getReg(); |
261 | 41.3k | if (isAnyVecReg(Reg, Partial)41.3k ) { |
262 | 4.91k | RelevantInstr = true; |
263 | 4.91k | break; |
264 | 4.91k | } |
265 | 21.7k | } |
266 | 21.7k | |
267 | 21.7k | if (!RelevantInstr) |
268 | 16.8k | continue; |
269 | 4.91k | |
270 | 4.91k | RelevantFunction = true; |
271 | 4.91k | |
272 | 4.91k | // Create a SwapEntry initialized to zeros, then fill in the |
273 | 4.91k | // instruction and ID fields before pushing it to the back |
274 | 4.91k | // of the swap vector. |
275 | 4.91k | PPCVSXSwapEntry SwapEntry{}; |
276 | 4.91k | int VecIdx = addSwapEntry(&MI, SwapEntry); |
277 | 4.91k | |
278 | 4.91k | switch(MI.getOpcode()) { |
279 | 1.62k | default: |
280 | 1.62k | // Unless noted otherwise, an instruction is considered |
281 | 1.62k | // safe for the optimization. There are a large number of |
282 | 1.62k | // such true-SIMD instructions (all vector math, logical, |
283 | 1.62k | // select, compare, etc.). However, if the instruction |
284 | 1.62k | // mentions a partial vector register and does not have |
285 | 1.62k | // special handling defined, it is not swappable. |
286 | 1.62k | if (Partial) |
287 | 736 | SwapVector[VecIdx].MentionsPartialVR = 1; |
288 | 1.62k | else |
289 | 893 | SwapVector[VecIdx].IsSwappable = 1; |
290 | 1.62k | break; |
291 | 579 | case PPC::XXPERMDI: { |
292 | 579 | // This is a swap if it is of the form XXPERMDI t, s, s, 2. |
293 | 579 | // Unfortunately, MachineCSE ignores COPY and SUBREG_TO_REG, so we |
294 | 579 | // can also see XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), 2, |
295 | 579 | // for example. We have to look through chains of COPY and |
296 | 579 | // SUBREG_TO_REG to find the real source value for comparison. |
297 | 579 | // If the real source value is a physical register, then mark the |
298 | 579 | // XXPERMDI as mentioning a physical register. |
299 | 579 | int immed = MI.getOperand(3).getImm(); |
300 | 579 | if (immed == 2579 ) { |
301 | 414 | unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(), |
302 | 414 | VecIdx); |
303 | 414 | unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(), |
304 | 414 | VecIdx); |
305 | 414 | if (trueReg1 == trueReg2) |
306 | 404 | SwapVector[VecIdx].IsSwap = 1; |
307 | 10 | else { |
308 | 10 | // We can still handle these if the two registers are not |
309 | 10 | // identical, by adjusting the form of the XXPERMDI. |
310 | 10 | SwapVector[VecIdx].IsSwappable = 1; |
311 | 10 | SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI; |
312 | 10 | } |
313 | 414 | // This is a doubleword splat if it is of the form |
314 | 414 | // XXPERMDI t, s, s, 0 or XXPERMDI t, s, s, 3. As above we |
315 | 414 | // must look through chains of copy-likes to find the source |
316 | 414 | // register. We turn off the marking for mention of a physical |
317 | 414 | // register, because splatting it is safe; the optimization |
318 | 414 | // will not swap the value in the physical register. Whether |
319 | 414 | // or not the two input registers are identical, we can handle |
320 | 414 | // these by adjusting the form of the XXPERMDI. |
321 | 579 | } else if (165 immed == 0 || 165 immed == 351 ) { |
322 | 149 | |
323 | 149 | SwapVector[VecIdx].IsSwappable = 1; |
324 | 149 | SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI; |
325 | 149 | |
326 | 149 | unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(), |
327 | 149 | VecIdx); |
328 | 149 | unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(), |
329 | 149 | VecIdx); |
330 | 149 | if (trueReg1 == trueReg2) |
331 | 44 | SwapVector[VecIdx].MentionsPhysVR = 0; |
332 | 149 | |
333 | 165 | } else { |
334 | 16 | // We can still handle these by adjusting the form of the XXPERMDI. |
335 | 16 | SwapVector[VecIdx].IsSwappable = 1; |
336 | 16 | SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI; |
337 | 16 | } |
338 | 579 | break; |
339 | 4.91k | } |
340 | 87 | case PPC::LVX: |
341 | 87 | // Non-permuting loads are currently unsafe. We can use special |
342 | 87 | // handling for this in the future. By not marking these as |
343 | 87 | // IsSwap, we ensure computations containing them will be rejected |
344 | 87 | // for now. |
345 | 87 | SwapVector[VecIdx].IsLoad = 1; |
346 | 87 | break; |
347 | 211 | case PPC::LXVD2X: |
348 | 211 | case PPC::LXVW4X: |
349 | 211 | // Permuting loads are marked as both load and swap, and are |
350 | 211 | // safe for optimization. |
351 | 211 | SwapVector[VecIdx].IsLoad = 1; |
352 | 211 | SwapVector[VecIdx].IsSwap = 1; |
353 | 211 | break; |
354 | 139 | case PPC::LXSDX: |
355 | 139 | case PPC::LXSSPX: |
356 | 139 | // A load of a floating-point value into the high-order half of |
357 | 139 | // a vector register is safe, provided that we introduce a swap |
358 | 139 | // following the load, which will be done by the SUBREG_TO_REG |
359 | 139 | // support. So just mark these as safe. |
360 | 139 | SwapVector[VecIdx].IsLoad = 1; |
361 | 139 | SwapVector[VecIdx].IsSwappable = 1; |
362 | 139 | break; |
363 | 40 | case PPC::STVX: |
364 | 40 | // Non-permuting stores are currently unsafe. We can use special |
365 | 40 | // handling for this in the future. By not marking these as |
366 | 40 | // IsSwap, we ensure computations containing them will be rejected |
367 | 40 | // for now. |
368 | 40 | SwapVector[VecIdx].IsStore = 1; |
369 | 40 | break; |
370 | 102 | case PPC::STXVD2X: |
371 | 102 | case PPC::STXVW4X: |
372 | 102 | // Permuting stores are marked as both store and swap, and are |
373 | 102 | // safe for optimization. |
374 | 102 | SwapVector[VecIdx].IsStore = 1; |
375 | 102 | SwapVector[VecIdx].IsSwap = 1; |
376 | 102 | break; |
377 | 1.67k | case PPC::COPY: |
378 | 1.67k | // These are fine provided they are moving between full vector |
379 | 1.67k | // register classes. |
380 | 1.67k | if (isVecReg(MI.getOperand(0).getReg()) && |
381 | 1.03k | isVecReg(MI.getOperand(1).getReg())) |
382 | 1.03k | SwapVector[VecIdx].IsSwappable = 1; |
383 | 1.67k | // If we have a copy from one scalar floating-point register |
384 | 1.67k | // to another, we can accept this even if it is a physical |
385 | 1.67k | // register. The only way this gets involved is if it feeds |
386 | 1.67k | // a SUBREG_TO_REG, which is handled by introducing a swap. |
387 | 640 | else if (640 isScalarVecReg(MI.getOperand(0).getReg()) && |
388 | 640 | isScalarVecReg(MI.getOperand(1).getReg())) |
389 | 517 | SwapVector[VecIdx].IsSwappable = 1; |
390 | 1.67k | break; |
391 | 179 | case PPC::SUBREG_TO_REG: { |
392 | 179 | // These are fine provided they are moving between full vector |
393 | 179 | // register classes. If they are moving from a scalar |
394 | 179 | // floating-point class to a vector class, we can handle those |
395 | 179 | // as well, provided we introduce a swap. It is generally the |
396 | 179 | // case that we will introduce fewer swaps than we remove, but |
397 | 179 | // (FIXME) a cost model could be used. However, introduced |
398 | 179 | // swaps could potentially be CSEd, so this is not trivial. |
399 | 179 | if (isVecReg(MI.getOperand(0).getReg()) && |
400 | 179 | isVecReg(MI.getOperand(2).getReg())) |
401 | 0 | SwapVector[VecIdx].IsSwappable = 1; |
402 | 179 | else if (179 isVecReg(MI.getOperand(0).getReg()) && |
403 | 179 | isScalarVecReg(MI.getOperand(2).getReg())179 ) { |
404 | 179 | SwapVector[VecIdx].IsSwappable = 1; |
405 | 179 | SwapVector[VecIdx].SpecialHandling = SHValues::SH_COPYWIDEN; |
406 | 179 | } |
407 | 179 | break; |
408 | 102 | } |
409 | 48 | case PPC::VSPLTB: |
410 | 48 | case PPC::VSPLTH: |
411 | 48 | case PPC::VSPLTW: |
412 | 48 | case PPC::XXSPLTW: |
413 | 48 | // Splats are lane-sensitive, but we can use special handling |
414 | 48 | // to adjust the source lane for the splat. |
415 | 48 | SwapVector[VecIdx].IsSwappable = 1; |
416 | 48 | SwapVector[VecIdx].SpecialHandling = SHValues::SH_SPLAT; |
417 | 48 | break; |
418 | 48 | // The presence of the following lane-sensitive operations in a |
419 | 48 | // web will kill the optimization, at least for now. For these |
420 | 48 | // we do nothing, causing the optimization to fail. |
421 | 48 | // FIXME: Some of these could be permitted with special handling, |
422 | 48 | // and will be phased in as time permits. |
423 | 48 | // FIXME: There is no simple and maintainable way to express a set |
424 | 48 | // of opcodes having a common attribute in TableGen. Should this |
425 | 48 | // change, this is a prime candidate to use such a mechanism. |
426 | 224 | case PPC::INLINEASM: |
427 | 224 | case PPC::EXTRACT_SUBREG: |
428 | 224 | case PPC::INSERT_SUBREG: |
429 | 224 | case PPC::COPY_TO_REGCLASS: |
430 | 224 | case PPC::LVEBX: |
431 | 224 | case PPC::LVEHX: |
432 | 224 | case PPC::LVEWX: |
433 | 224 | case PPC::LVSL: |
434 | 224 | case PPC::LVSR: |
435 | 224 | case PPC::LVXL: |
436 | 224 | case PPC::STVEBX: |
437 | 224 | case PPC::STVEHX: |
438 | 224 | case PPC::STVEWX: |
439 | 224 | case PPC::STVXL: |
440 | 224 | // We can handle STXSDX and STXSSPX similarly to LXSDX and LXSSPX, |
441 | 224 | // by adding special handling for narrowing copies as well as |
442 | 224 | // widening ones. However, I've experimented with this, and in |
443 | 224 | // practice we currently do not appear to use STXSDX fed by |
444 | 224 | // a narrowing copy from a full vector register. Since I can't |
445 | 224 | // generate any useful test cases, I've left this alone for now. |
446 | 224 | case PPC::STXSDX: |
447 | 224 | case PPC::STXSSPX: |
448 | 224 | case PPC::VCIPHER: |
449 | 224 | case PPC::VCIPHERLAST: |
450 | 224 | case PPC::VMRGHB: |
451 | 224 | case PPC::VMRGHH: |
452 | 224 | case PPC::VMRGHW: |
453 | 224 | case PPC::VMRGLB: |
454 | 224 | case PPC::VMRGLH: |
455 | 224 | case PPC::VMRGLW: |
456 | 224 | case PPC::VMULESB: |
457 | 224 | case PPC::VMULESH: |
458 | 224 | case PPC::VMULESW: |
459 | 224 | case PPC::VMULEUB: |
460 | 224 | case PPC::VMULEUH: |
461 | 224 | case PPC::VMULEUW: |
462 | 224 | case PPC::VMULOSB: |
463 | 224 | case PPC::VMULOSH: |
464 | 224 | case PPC::VMULOSW: |
465 | 224 | case PPC::VMULOUB: |
466 | 224 | case PPC::VMULOUH: |
467 | 224 | case PPC::VMULOUW: |
468 | 224 | case PPC::VNCIPHER: |
469 | 224 | case PPC::VNCIPHERLAST: |
470 | 224 | case PPC::VPERM: |
471 | 224 | case PPC::VPERMXOR: |
472 | 224 | case PPC::VPKPX: |
473 | 224 | case PPC::VPKSHSS: |
474 | 224 | case PPC::VPKSHUS: |
475 | 224 | case PPC::VPKSDSS: |
476 | 224 | case PPC::VPKSDUS: |
477 | 224 | case PPC::VPKSWSS: |
478 | 224 | case PPC::VPKSWUS: |
479 | 224 | case PPC::VPKUDUM: |
480 | 224 | case PPC::VPKUDUS: |
481 | 224 | case PPC::VPKUHUM: |
482 | 224 | case PPC::VPKUHUS: |
483 | 224 | case PPC::VPKUWUM: |
484 | 224 | case PPC::VPKUWUS: |
485 | 224 | case PPC::VPMSUMB: |
486 | 224 | case PPC::VPMSUMD: |
487 | 224 | case PPC::VPMSUMH: |
488 | 224 | case PPC::VPMSUMW: |
489 | 224 | case PPC::VRLB: |
490 | 224 | case PPC::VRLD: |
491 | 224 | case PPC::VRLH: |
492 | 224 | case PPC::VRLW: |
493 | 224 | case PPC::VSBOX: |
494 | 224 | case PPC::VSHASIGMAD: |
495 | 224 | case PPC::VSHASIGMAW: |
496 | 224 | case PPC::VSL: |
497 | 224 | case PPC::VSLDOI: |
498 | 224 | case PPC::VSLO: |
499 | 224 | case PPC::VSR: |
500 | 224 | case PPC::VSRO: |
501 | 224 | case PPC::VSUM2SWS: |
502 | 224 | case PPC::VSUM4SBS: |
503 | 224 | case PPC::VSUM4SHS: |
504 | 224 | case PPC::VSUM4UBS: |
505 | 224 | case PPC::VSUMSWS: |
506 | 224 | case PPC::VUPKHPX: |
507 | 224 | case PPC::VUPKHSB: |
508 | 224 | case PPC::VUPKHSH: |
509 | 224 | case PPC::VUPKHSW: |
510 | 224 | case PPC::VUPKLPX: |
511 | 224 | case PPC::VUPKLSB: |
512 | 224 | case PPC::VUPKLSH: |
513 | 224 | case PPC::VUPKLSW: |
514 | 224 | case PPC::XXMRGHW: |
515 | 224 | case PPC::XXMRGLW: |
516 | 224 | // XXSLDWI could be replaced by a general permute with one of three |
517 | 224 | // permute control vectors (for shift values 1, 2, 3). However, |
518 | 224 | // VPERM has a more restrictive register class. |
519 | 224 | case PPC::XXSLDWI: |
520 | 224 | break; |
521 | 1.77k | } |
522 | 1.77k | } |
523 | 3.95k | } |
524 | 1.77k | |
525 | 1.77k | if (1.77k RelevantFunction1.77k ) { |
526 | 711 | DEBUG(dbgs() << "Swap vector when first built\n\n"); |
527 | 711 | DEBUG(dumpSwapVector()); |
528 | 711 | } |
529 | 1.77k | |
530 | 1.77k | return RelevantFunction; |
531 | 1.77k | } |
532 | | |
533 | | // Add an entry to the swap vector and swap map, and make a |
534 | | // singleton equivalence class for the entry. |
535 | | int PPCVSXSwapRemoval::addSwapEntry(MachineInstr *MI, |
536 | 4.91k | PPCVSXSwapEntry& SwapEntry) { |
537 | 4.91k | SwapEntry.VSEMI = MI; |
538 | 4.91k | SwapEntry.VSEId = SwapVector.size(); |
539 | 4.91k | SwapVector.push_back(SwapEntry); |
540 | 4.91k | EC->insert(SwapEntry.VSEId); |
541 | 4.91k | SwapMap[MI] = SwapEntry.VSEId; |
542 | 4.91k | return SwapEntry.VSEId; |
543 | 4.91k | } |
544 | | |
545 | | // This is used to find the "true" source register for an |
546 | | // XXPERMDI instruction, since MachineCSE does not handle the |
547 | | // "copy-like" operations (Copy and SubregToReg). Returns |
548 | | // the original SrcReg unless it is the target of a copy-like |
549 | | // operation, in which case we chain backwards through all |
550 | | // such operations to the ultimate source register. If a |
551 | | // physical register is encountered, we stop the search and |
552 | | // flag the swap entry indicated by VecIdx (the original |
553 | | // XXPERMDI) as mentioning a physical register. |
554 | | unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg, |
555 | 1.34k | unsigned VecIdx) { |
556 | 1.34k | MachineInstr *MI = MRI->getVRegDef(SrcReg); |
557 | 1.34k | if (!MI->isCopyLike()) |
558 | 942 | return SrcReg; |
559 | 402 | |
560 | 402 | unsigned CopySrcReg; |
561 | 402 | if (MI->isCopy()) |
562 | 212 | CopySrcReg = MI->getOperand(1).getReg(); |
563 | 190 | else { |
564 | 190 | assert(MI->isSubregToReg() && "bad opcode for lookThruCopyLike"); |
565 | 190 | CopySrcReg = MI->getOperand(2).getReg(); |
566 | 190 | } |
567 | 402 | |
568 | 402 | if (!TargetRegisterInfo::isVirtualRegister(CopySrcReg)402 ) { |
569 | 184 | if (!isScalarVecReg(CopySrcReg)) |
570 | 156 | SwapVector[VecIdx].MentionsPhysVR = 1; |
571 | 184 | return CopySrcReg; |
572 | 184 | } |
573 | 218 | |
574 | 218 | return lookThruCopyLike(CopySrcReg, VecIdx); |
575 | 218 | } |
576 | | |
577 | | // Generate equivalence classes for related computations (webs) by |
578 | | // def-use relationships of virtual registers. Mention of a physical |
579 | | // register terminates the generation of equivalence classes as this |
580 | | // indicates a use of a parameter, definition of a return value, use |
581 | | // of a value returned from a call, or definition of a parameter to a |
582 | | // call. Computations with physical register mentions are flagged |
583 | | // as such so their containing webs will not be optimized. |
584 | 711 | void PPCVSXSwapRemoval::formWebs() { |
585 | 711 | |
586 | 711 | DEBUG(dbgs() << "\n*** Forming webs for swap removal ***\n\n"); |
587 | 711 | |
588 | 5.62k | for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size()5.62k ; ++EntryIdx4.91k ) { |
589 | 4.91k | |
590 | 4.91k | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
591 | 4.91k | |
592 | 4.91k | DEBUG(dbgs() << "\n" << SwapVector[EntryIdx].VSEId << " "); |
593 | 4.91k | DEBUG(MI->dump()); |
594 | 4.91k | |
595 | 4.91k | // It's sufficient to walk vector uses and join them to their unique |
596 | 4.91k | // definitions. In addition, check full vector register operands |
597 | 4.91k | // for physical regs. We exclude partial-vector register operands |
598 | 4.91k | // because we can handle them if copied to a full vector. |
599 | 14.3k | for (const MachineOperand &MO : MI->operands()) { |
600 | 14.3k | if (!MO.isReg()) |
601 | 1.22k | continue; |
602 | 13.1k | |
603 | 13.1k | unsigned Reg = MO.getReg(); |
604 | 13.1k | if (!isVecReg(Reg) && 13.1k !isScalarVecReg(Reg)6.49k ) |
605 | 3.58k | continue; |
606 | 9.57k | |
607 | 9.57k | if (9.57k !TargetRegisterInfo::isVirtualRegister(Reg)9.57k ) { |
608 | 2.10k | if (!(MI->isCopy() && 2.10k isScalarVecReg(Reg)1.40k )) |
609 | 1.61k | SwapVector[EntryIdx].MentionsPhysVR = 1; |
610 | 2.10k | continue; |
611 | 2.10k | } |
612 | 7.47k | |
613 | 7.47k | if (7.47k !MO.isUse()7.47k ) |
614 | 3.34k | continue; |
615 | 4.12k | |
616 | 4.12k | MachineInstr* DefMI = MRI->getVRegDef(Reg); |
617 | 4.12k | assert(SwapMap.find(DefMI) != SwapMap.end() && |
618 | 4.12k | "Inconsistency: def of vector reg not found in swap map!"); |
619 | 4.12k | int DefIdx = SwapMap[DefMI]; |
620 | 4.12k | (void)EC->unionSets(SwapVector[DefIdx].VSEId, |
621 | 4.12k | SwapVector[EntryIdx].VSEId); |
622 | 4.12k | |
623 | 4.12k | DEBUG(dbgs() << format("Unioning %d with %d\n", SwapVector[DefIdx].VSEId, |
624 | 4.12k | SwapVector[EntryIdx].VSEId)); |
625 | 4.12k | DEBUG(dbgs() << " Def: "); |
626 | 4.12k | DEBUG(DefMI->dump()); |
627 | 14.3k | } |
628 | 4.91k | } |
629 | 711 | } |
630 | | |
631 | | // Walk the swap vector entries looking for conditions that prevent their |
632 | | // containing computations from being optimized. When such conditions are |
633 | | // found, mark the representative of the computation's equivalence class |
634 | | // as rejected. |
635 | 711 | void PPCVSXSwapRemoval::recordUnoptimizableWebs() { |
636 | 711 | |
637 | 711 | DEBUG(dbgs() << "\n*** Rejecting webs for swap removal ***\n\n"); |
638 | 711 | |
639 | 5.62k | for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size()5.62k ; ++EntryIdx4.91k ) { |
640 | 4.91k | int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); |
641 | 4.91k | |
642 | 4.91k | // If representative is already rejected, don't waste further time. |
643 | 4.91k | if (SwapVector[Repr].WebRejected) |
644 | 2.34k | continue; |
645 | 2.56k | |
646 | 2.56k | // Reject webs containing mentions of physical or partial registers, or |
647 | 2.56k | // containing operations that we don't know how to handle in a lane- |
648 | 2.56k | // permuted region. |
649 | 2.56k | if (2.56k SwapVector[EntryIdx].MentionsPhysVR || |
650 | 1.55k | SwapVector[EntryIdx].MentionsPartialVR || |
651 | 2.56k | !(SwapVector[EntryIdx].IsSwappable || 1.37k SwapVector[EntryIdx].IsSwap690 )) { |
652 | 1.29k | |
653 | 1.29k | SwapVector[Repr].WebRejected = 1; |
654 | 1.29k | |
655 | 1.29k | DEBUG(dbgs() << |
656 | 1.29k | format("Web %d rejected for physreg, partial reg, or not " |
657 | 1.29k | "swap[pable]\n", Repr)); |
658 | 1.29k | DEBUG(dbgs() << " in " << EntryIdx << ": "); |
659 | 1.29k | DEBUG(SwapVector[EntryIdx].VSEMI->dump()); |
660 | 1.29k | DEBUG(dbgs() << "\n"); |
661 | 1.29k | } |
662 | 2.56k | |
663 | 2.56k | // Reject webs than contain swapping loads that feed something other |
664 | 2.56k | // than a swap instruction. |
665 | 1.26k | else if (1.26k SwapVector[EntryIdx].IsLoad && 1.26k SwapVector[EntryIdx].IsSwap285 ) { |
666 | 203 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
667 | 203 | unsigned DefReg = MI->getOperand(0).getReg(); |
668 | 203 | |
669 | 203 | // We skip debug instructions in the analysis. (Note that debug |
670 | 203 | // location information is still maintained by this optimization |
671 | 203 | // because it remains on the LXVD2X and STXVD2X instructions after |
672 | 203 | // the XXPERMDIs are removed.) |
673 | 203 | for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { |
674 | 203 | int UseIdx = SwapMap[&UseMI]; |
675 | 203 | |
676 | 203 | if (!SwapVector[UseIdx].IsSwap || 203 SwapVector[UseIdx].IsLoad201 || |
677 | 203 | SwapVector[UseIdx].IsStore201 ) { |
678 | 2 | |
679 | 2 | SwapVector[Repr].WebRejected = 1; |
680 | 2 | |
681 | 2 | DEBUG(dbgs() << |
682 | 2 | format("Web %d rejected for load not feeding swap\n", Repr)); |
683 | 2 | DEBUG(dbgs() << " def " << EntryIdx << ": "); |
684 | 2 | DEBUG(MI->dump()); |
685 | 2 | DEBUG(dbgs() << " use " << UseIdx << ": "); |
686 | 2 | DEBUG(UseMI.dump()); |
687 | 2 | DEBUG(dbgs() << "\n"); |
688 | 2 | } |
689 | 203 | } |
690 | 203 | |
691 | 203 | // Reject webs that contain swapping stores that are fed by something |
692 | 203 | // other than a swap instruction. |
693 | 1.26k | } else if (1.06k SwapVector[EntryIdx].IsStore && 1.06k SwapVector[EntryIdx].IsSwap86 ) { |
694 | 86 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
695 | 86 | unsigned UseReg = MI->getOperand(0).getReg(); |
696 | 86 | MachineInstr *DefMI = MRI->getVRegDef(UseReg); |
697 | 86 | unsigned DefReg = DefMI->getOperand(0).getReg(); |
698 | 86 | int DefIdx = SwapMap[DefMI]; |
699 | 86 | |
700 | 86 | if (!SwapVector[DefIdx].IsSwap || 86 SwapVector[DefIdx].IsLoad86 || |
701 | 86 | SwapVector[DefIdx].IsStore86 ) { |
702 | 0 |
|
703 | 0 | SwapVector[Repr].WebRejected = 1; |
704 | 0 |
|
705 | 0 | DEBUG(dbgs() << |
706 | 0 | format("Web %d rejected for store not fed by swap\n", Repr)); |
707 | 0 | DEBUG(dbgs() << " def " << DefIdx << ": "); |
708 | 0 | DEBUG(DefMI->dump()); |
709 | 0 | DEBUG(dbgs() << " use " << EntryIdx << ": "); |
710 | 0 | DEBUG(MI->dump()); |
711 | 0 | DEBUG(dbgs() << "\n"); |
712 | 0 | } |
713 | 86 | |
714 | 86 | // Ensure all uses of the register defined by DefMI feed store |
715 | 86 | // instructions |
716 | 99 | for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { |
717 | 99 | int UseIdx = SwapMap[&UseMI]; |
718 | 99 | |
719 | 99 | if (SwapVector[UseIdx].VSEMI->getOpcode() != MI->getOpcode()99 ) { |
720 | 1 | SwapVector[Repr].WebRejected = 1; |
721 | 1 | |
722 | 1 | DEBUG(dbgs() << |
723 | 1 | format("Web %d rejected for swap not feeding only stores\n", |
724 | 1 | Repr)); |
725 | 1 | DEBUG(dbgs() << " def " << " : "); |
726 | 1 | DEBUG(DefMI->dump()); |
727 | 1 | DEBUG(dbgs() << " use " << UseIdx << ": "); |
728 | 1 | DEBUG(SwapVector[UseIdx].VSEMI->dump()); |
729 | 1 | DEBUG(dbgs() << "\n"); |
730 | 1 | } |
731 | 99 | } |
732 | 1.26k | } |
733 | 4.91k | } |
734 | 711 | |
735 | 711 | DEBUG(dbgs() << "Swap vector after web analysis:\n\n"); |
736 | 711 | DEBUG(dumpSwapVector()); |
737 | 711 | } |
738 | | |
739 | | // Walk the swap vector entries looking for swaps fed by permuting loads |
740 | | // and swaps that feed permuting stores. If the containing computation |
741 | | // has not been marked rejected, mark each such swap for removal. |
742 | | // (Removal is delayed in case optimization has disturbed the pattern, |
743 | | // such that multiple loads feed the same swap, etc.) |
744 | 711 | void PPCVSXSwapRemoval::markSwapsForRemoval() { |
745 | 711 | |
746 | 711 | DEBUG(dbgs() << "\n*** Marking swaps for removal ***\n\n"); |
747 | 711 | |
748 | 5.62k | for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size()5.62k ; ++EntryIdx4.91k ) { |
749 | 4.91k | |
750 | 4.91k | if (SwapVector[EntryIdx].IsLoad && 4.91k SwapVector[EntryIdx].IsSwap437 ) { |
751 | 211 | int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); |
752 | 211 | |
753 | 211 | if (!SwapVector[Repr].WebRejected211 ) { |
754 | 75 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
755 | 75 | unsigned DefReg = MI->getOperand(0).getReg(); |
756 | 75 | |
757 | 75 | for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { |
758 | 75 | int UseIdx = SwapMap[&UseMI]; |
759 | 75 | SwapVector[UseIdx].WillRemove = 1; |
760 | 75 | |
761 | 75 | DEBUG(dbgs() << "Marking swap fed by load for removal: "); |
762 | 75 | DEBUG(UseMI.dump()); |
763 | 75 | } |
764 | 75 | } |
765 | 211 | |
766 | 4.91k | } else if (4.70k SwapVector[EntryIdx].IsStore && 4.70k SwapVector[EntryIdx].IsSwap142 ) { |
767 | 102 | int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); |
768 | 102 | |
769 | 102 | if (!SwapVector[Repr].WebRejected102 ) { |
770 | 71 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
771 | 71 | unsigned UseReg = MI->getOperand(0).getReg(); |
772 | 71 | MachineInstr *DefMI = MRI->getVRegDef(UseReg); |
773 | 71 | int DefIdx = SwapMap[DefMI]; |
774 | 71 | SwapVector[DefIdx].WillRemove = 1; |
775 | 71 | |
776 | 71 | DEBUG(dbgs() << "Marking swap feeding store for removal: "); |
777 | 71 | DEBUG(DefMI->dump()); |
778 | 71 | } |
779 | 102 | |
780 | 4.70k | } else if (4.59k SwapVector[EntryIdx].IsSwappable && |
781 | 4.59k | SwapVector[EntryIdx].SpecialHandling != 02.98k ) { |
782 | 402 | int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); |
783 | 402 | |
784 | 402 | if (!SwapVector[Repr].WebRejected) |
785 | 31 | handleSpecialSwappables(EntryIdx); |
786 | 4.70k | } |
787 | 4.91k | } |
788 | 711 | } |
789 | | |
790 | | // Create an xxswapd instruction and insert it prior to the given point. |
791 | | // MI is used to determine basic block and debug loc information. |
792 | | // FIXME: When inserting a swap, we should check whether SrcReg is |
793 | | // defined by another swap: SrcReg = XXPERMDI Reg, Reg, 2; If so, |
794 | | // then instead we should generate a copy from Reg to DstReg. |
795 | | void PPCVSXSwapRemoval::insertSwap(MachineInstr *MI, |
796 | | MachineBasicBlock::iterator InsertPoint, |
797 | 9 | unsigned DstReg, unsigned SrcReg) { |
798 | 9 | BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(), |
799 | 9 | TII->get(PPC::XXPERMDI), DstReg) |
800 | 9 | .addReg(SrcReg) |
801 | 9 | .addReg(SrcReg) |
802 | 9 | .addImm(2); |
803 | 9 | } |
804 | | |
805 | | // The identified swap entry requires special handling to allow its |
806 | | // containing computation to be optimized. Perform that handling |
807 | | // here. |
808 | | // FIXME: Additional opportunities will be phased in with subsequent |
809 | | // patches. |
810 | 31 | void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) { |
811 | 31 | switch (SwapVector[EntryIdx].SpecialHandling) { |
812 | 31 | |
813 | 0 | default: |
814 | 0 | llvm_unreachable("Unexpected special handling type"); |
815 | 31 | |
816 | 31 | // For splats based on an index into a vector, add N/2 modulo N |
817 | 31 | // to the index, where N is the number of vector elements. |
818 | 3 | case SHValues::SH_SPLAT: { |
819 | 3 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
820 | 3 | unsigned NElts; |
821 | 3 | |
822 | 3 | DEBUG(dbgs() << "Changing splat: "); |
823 | 3 | DEBUG(MI->dump()); |
824 | 3 | |
825 | 3 | switch (MI->getOpcode()) { |
826 | 0 | default: |
827 | 0 | llvm_unreachable("Unexpected splat opcode"); |
828 | 1 | case PPC::VSPLTB: NElts = 16; break; |
829 | 1 | case PPC::VSPLTH: NElts = 8; break; |
830 | 1 | case PPC::VSPLTW: |
831 | 1 | case PPC::XXSPLTW: NElts = 4; break; |
832 | 3 | } |
833 | 3 | |
834 | 3 | unsigned EltNo; |
835 | 3 | if (MI->getOpcode() == PPC::XXSPLTW) |
836 | 1 | EltNo = MI->getOperand(2).getImm(); |
837 | 3 | else |
838 | 2 | EltNo = MI->getOperand(1).getImm(); |
839 | 3 | |
840 | 3 | EltNo = (EltNo + NElts / 2) % NElts; |
841 | 3 | if (MI->getOpcode() == PPC::XXSPLTW) |
842 | 1 | MI->getOperand(2).setImm(EltNo); |
843 | 3 | else |
844 | 2 | MI->getOperand(1).setImm(EltNo); |
845 | 3 | |
846 | 3 | DEBUG(dbgs() << " Into: "); |
847 | 3 | DEBUG(MI->dump()); |
848 | 3 | break; |
849 | 3 | } |
850 | 3 | |
851 | 3 | // For an XXPERMDI that isn't handled otherwise, we need to |
852 | 3 | // reverse the order of the operands. If the selector operand |
853 | 3 | // has a value of 0 or 3, we need to change it to 3 or 0, |
854 | 3 | // respectively. Otherwise we should leave it alone. (This |
855 | 3 | // is equivalent to reversing the two bits of the selector |
856 | 3 | // operand and complementing the result.) |
857 | 19 | case SHValues::SH_XXPERMDI: { |
858 | 19 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
859 | 19 | |
860 | 19 | DEBUG(dbgs() << "Changing XXPERMDI: "); |
861 | 19 | DEBUG(MI->dump()); |
862 | 19 | |
863 | 19 | unsigned Selector = MI->getOperand(3).getImm(); |
864 | 19 | if (Selector == 0 || 19 Selector == 39 ) |
865 | 16 | Selector = 3 - Selector; |
866 | 19 | MI->getOperand(3).setImm(Selector); |
867 | 19 | |
868 | 19 | unsigned Reg1 = MI->getOperand(1).getReg(); |
869 | 19 | unsigned Reg2 = MI->getOperand(2).getReg(); |
870 | 19 | MI->getOperand(1).setReg(Reg2); |
871 | 19 | MI->getOperand(2).setReg(Reg1); |
872 | 19 | |
873 | 19 | DEBUG(dbgs() << " Into: "); |
874 | 19 | DEBUG(MI->dump()); |
875 | 19 | break; |
876 | 3 | } |
877 | 3 | |
878 | 3 | // For a copy from a scalar floating-point register to a vector |
879 | 3 | // register, removing swaps will leave the copied value in the |
880 | 3 | // wrong lane. Insert a swap following the copy to fix this. |
881 | 9 | case SHValues::SH_COPYWIDEN: { |
882 | 9 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
883 | 9 | |
884 | 9 | DEBUG(dbgs() << "Changing SUBREG_TO_REG: "); |
885 | 9 | DEBUG(MI->dump()); |
886 | 9 | |
887 | 9 | unsigned DstReg = MI->getOperand(0).getReg(); |
888 | 9 | const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); |
889 | 9 | unsigned NewVReg = MRI->createVirtualRegister(DstRC); |
890 | 9 | |
891 | 9 | MI->getOperand(0).setReg(NewVReg); |
892 | 9 | DEBUG(dbgs() << " Into: "); |
893 | 9 | DEBUG(MI->dump()); |
894 | 9 | |
895 | 9 | auto InsertPoint = ++MachineBasicBlock::iterator(MI); |
896 | 9 | |
897 | 9 | // Note that an XXPERMDI requires a VSRC, so if the SUBREG_TO_REG |
898 | 9 | // is copying to a VRRC, we need to be careful to avoid a register |
899 | 9 | // assignment problem. In this case we must copy from VRRC to VSRC |
900 | 9 | // prior to the swap, and from VSRC to VRRC following the swap. |
901 | 9 | // Coalescing will usually remove all this mess. |
902 | 9 | if (DstRC == &PPC::VRRCRegClass9 ) { |
903 | 0 | unsigned VSRCTmp1 = MRI->createVirtualRegister(&PPC::VSRCRegClass); |
904 | 0 | unsigned VSRCTmp2 = MRI->createVirtualRegister(&PPC::VSRCRegClass); |
905 | 0 |
|
906 | 0 | BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(), |
907 | 0 | TII->get(PPC::COPY), VSRCTmp1) |
908 | 0 | .addReg(NewVReg); |
909 | 0 | DEBUG(std::prev(InsertPoint)->dump()); |
910 | 0 |
|
911 | 0 | insertSwap(MI, InsertPoint, VSRCTmp2, VSRCTmp1); |
912 | 0 | DEBUG(std::prev(InsertPoint)->dump()); |
913 | 0 |
|
914 | 0 | BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(), |
915 | 0 | TII->get(PPC::COPY), DstReg) |
916 | 0 | .addReg(VSRCTmp2); |
917 | 0 | DEBUG(std::prev(InsertPoint)->dump()); |
918 | 0 |
|
919 | 9 | } else { |
920 | 9 | insertSwap(MI, InsertPoint, DstReg, NewVReg); |
921 | 9 | DEBUG(std::prev(InsertPoint)->dump()); |
922 | 9 | } |
923 | 9 | break; |
924 | 31 | } |
925 | 31 | } |
926 | 31 | } |
927 | | |
928 | | // Walk the swap vector and replace each entry marked for removal with |
929 | | // a copy operation. |
930 | 711 | bool PPCVSXSwapRemoval::removeSwaps() { |
931 | 711 | |
932 | 711 | DEBUG(dbgs() << "\n*** Removing swaps ***\n\n"); |
933 | 711 | |
934 | 711 | bool Changed = false; |
935 | 711 | |
936 | 5.62k | for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size()5.62k ; ++EntryIdx4.91k ) { |
937 | 4.91k | if (SwapVector[EntryIdx].WillRemove4.91k ) { |
938 | 141 | Changed = true; |
939 | 141 | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
940 | 141 | MachineBasicBlock *MBB = MI->getParent(); |
941 | 141 | BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY), |
942 | 141 | MI->getOperand(0).getReg()) |
943 | 141 | .add(MI->getOperand(1)); |
944 | 141 | |
945 | 141 | DEBUG(dbgs() << format("Replaced %d with copy: ", |
946 | 141 | SwapVector[EntryIdx].VSEId)); |
947 | 141 | DEBUG(MI->dump()); |
948 | 141 | |
949 | 141 | MI->eraseFromParent(); |
950 | 141 | } |
951 | 4.91k | } |
952 | 711 | |
953 | 711 | return Changed; |
954 | 711 | } |
955 | | |
956 | | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
957 | | // For debug purposes, dump the contents of the swap vector. |
958 | | LLVM_DUMP_METHOD void PPCVSXSwapRemoval::dumpSwapVector() { |
959 | | |
960 | | for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) { |
961 | | |
962 | | MachineInstr *MI = SwapVector[EntryIdx].VSEMI; |
963 | | int ID = SwapVector[EntryIdx].VSEId; |
964 | | |
965 | | dbgs() << format("%6d", ID); |
966 | | dbgs() << format("%6d", EC->getLeaderValue(ID)); |
967 | | dbgs() << format(" BB#%3d", MI->getParent()->getNumber()); |
968 | | dbgs() << format(" %14s ", TII->getName(MI->getOpcode()).str().c_str()); |
969 | | |
970 | | if (SwapVector[EntryIdx].IsLoad) |
971 | | dbgs() << "load "; |
972 | | if (SwapVector[EntryIdx].IsStore) |
973 | | dbgs() << "store "; |
974 | | if (SwapVector[EntryIdx].IsSwap) |
975 | | dbgs() << "swap "; |
976 | | if (SwapVector[EntryIdx].MentionsPhysVR) |
977 | | dbgs() << "physreg "; |
978 | | if (SwapVector[EntryIdx].MentionsPartialVR) |
979 | | dbgs() << "partialreg "; |
980 | | |
981 | | if (SwapVector[EntryIdx].IsSwappable) { |
982 | | dbgs() << "swappable "; |
983 | | switch(SwapVector[EntryIdx].SpecialHandling) { |
984 | | default: |
985 | | dbgs() << "special:**unknown**"; |
986 | | break; |
987 | | case SH_NONE: |
988 | | break; |
989 | | case SH_EXTRACT: |
990 | | dbgs() << "special:extract "; |
991 | | break; |
992 | | case SH_INSERT: |
993 | | dbgs() << "special:insert "; |
994 | | break; |
995 | | case SH_NOSWAP_LD: |
996 | | dbgs() << "special:load "; |
997 | | break; |
998 | | case SH_NOSWAP_ST: |
999 | | dbgs() << "special:store "; |
1000 | | break; |
1001 | | case SH_SPLAT: |
1002 | | dbgs() << "special:splat "; |
1003 | | break; |
1004 | | case SH_XXPERMDI: |
1005 | | dbgs() << "special:xxpermdi "; |
1006 | | break; |
1007 | | case SH_COPYWIDEN: |
1008 | | dbgs() << "special:copywiden "; |
1009 | | break; |
1010 | | } |
1011 | | } |
1012 | | |
1013 | | if (SwapVector[EntryIdx].WebRejected) |
1014 | | dbgs() << "rejected "; |
1015 | | if (SwapVector[EntryIdx].WillRemove) |
1016 | | dbgs() << "remove "; |
1017 | | |
1018 | | dbgs() << "\n"; |
1019 | | |
1020 | | // For no-asserts builds. |
1021 | | (void)MI; |
1022 | | (void)ID; |
1023 | | } |
1024 | | |
1025 | | dbgs() << "\n"; |
1026 | | } |
1027 | | #endif |
1028 | | |
1029 | | } // end default namespace |
1030 | | |
1031 | 233 | INITIALIZE_PASS_BEGIN233 (PPCVSXSwapRemoval, DEBUG_TYPE,
|
1032 | 233 | "PowerPC VSX Swap Removal", false, false) |
1033 | 233 | INITIALIZE_PASS_END(PPCVSXSwapRemoval, DEBUG_TYPE, |
1034 | | "PowerPC VSX Swap Removal", false, false) |
1035 | | |
1036 | | char PPCVSXSwapRemoval::ID = 0; |
1037 | | FunctionPass* |
1038 | 233 | llvm::createPPCVSXSwapRemovalPass() { return new PPCVSXSwapRemoval(); } |