Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- R600ExpandSpecialInstrs.cpp - Expand special instructions ----------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
/// \file
11
/// Vector, Reduction, and Cube instructions need to fill the entire instruction
12
/// group to work correctly.  This pass expands these individual instructions
13
/// into several instructions that will completely fill the instruction group.
14
//
15
//===----------------------------------------------------------------------===//
16
17
#include "AMDGPU.h"
18
#include "AMDGPUSubtarget.h"
19
#include "R600Defines.h"
20
#include "R600InstrInfo.h"
21
#include "R600RegisterInfo.h"
22
#include "llvm/CodeGen/MachineBasicBlock.h"
23
#include "llvm/CodeGen/MachineFunction.h"
24
#include "llvm/CodeGen/MachineFunctionPass.h"
25
#include "llvm/CodeGen/MachineInstr.h"
26
#include "llvm/CodeGen/MachineInstrBuilder.h"
27
#include "llvm/CodeGen/MachineOperand.h"
28
#include "llvm/Pass.h"
29
#include <cassert>
30
#include <cstdint>
31
#include <iterator>
32
33
using namespace llvm;
34
35
#define DEBUG_TYPE "r600-expand-special-instrs"
36
37
namespace {
38
39
class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
40
private:
41
  const R600InstrInfo *TII = nullptr;
42
43
  void SetFlagInNewMI(MachineInstr *NewMI, const MachineInstr *OldMI,
44
      unsigned Op);
45
46
public:
47
  static char ID;
48
49
244
  R600ExpandSpecialInstrsPass() : MachineFunctionPass(ID) {}
50
51
  bool runOnMachineFunction(MachineFunction &MF) override;
52
53
244
  StringRef getPassName() const override {
54
244
    return "R600 Expand special instructions pass";
55
244
  }
56
};
57
58
} // end anonymous namespace
59
60
90.0k
INITIALIZE_PASS_BEGIN90.0k
(R600ExpandSpecialInstrsPass, DEBUG_TYPE,
61
90.0k
                     "R600 Expand Special Instrs", false, false)
62
90.0k
INITIALIZE_PASS_END(R600ExpandSpecialInstrsPass, DEBUG_TYPE,
63
                    "R600ExpandSpecialInstrs", false, false)
64
65
char R600ExpandSpecialInstrsPass::ID = 0;
66
67
char &llvm::R600ExpandSpecialInstrsPassID = R600ExpandSpecialInstrsPass::ID;
68
69
244
FunctionPass *llvm::createR600ExpandSpecialInstrsPass() {
70
244
  return new R600ExpandSpecialInstrsPass();
71
244
}
72
73
void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
74
936
    const MachineInstr *OldMI, unsigned Op) {
75
936
  int OpIdx = TII->getOperandIdx(*OldMI, Op);
76
936
  if (
OpIdx > -1936
) {
77
664
    uint64_t Val = OldMI->getOperand(OpIdx).getImm();
78
664
    TII->setImmOperand(*NewMI, Op, Val);
79
664
  }
80
936
}
81
82
2.05k
bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
83
2.05k
  const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
84
2.05k
  TII = ST.getInstrInfo();
85
2.05k
86
2.05k
  const R600RegisterInfo &TRI = TII->getRegisterInfo();
87
2.05k
88
2.05k
  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
89
4.11k
                                                  
BB != BB_E4.11k
;
++BB2.05k
) {
90
2.05k
    MachineBasicBlock &MBB = *BB;
91
2.05k
    MachineBasicBlock::iterator I = MBB.begin();
92
59.6k
    while (
I != MBB.end()59.6k
) {
93
57.6k
      MachineInstr &MI = *I;
94
57.6k
      I = std::next(I);
95
57.6k
96
57.6k
      // Expand LDS_*_RET instructions
97
57.6k
      if (
TII->isLDSRetInstr(MI.getOpcode())57.6k
) {
98
811
        int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
99
811
        assert(DstIdx != -1);
100
811
        MachineOperand &DstOp = MI.getOperand(DstIdx);
101
811
        MachineInstr *Mov = TII->buildMovInstr(&MBB, I,
102
811
                                               DstOp.getReg(), AMDGPU::OQAP);
103
811
        DstOp.setReg(AMDGPU::OQAP);
104
811
        int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(),
105
811
                                           AMDGPU::OpName::pred_sel);
106
811
        int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(),
107
811
                                           AMDGPU::OpName::pred_sel);
108
811
        // Copy the pred_sel bit
109
811
        Mov->getOperand(MovPredSelIdx).setReg(
110
811
            MI.getOperand(LDSPredSelIdx).getReg());
111
811
      }
112
57.6k
113
57.6k
      switch (MI.getOpcode()) {
114
57.5k
      default: break;
115
57.6k
      // Expand PRED_X to one of the PRED_SET instructions.
116
84
      case AMDGPU::PRED_X: {
117
84
        uint64_t Flags = MI.getOperand(3).getImm();
118
84
        // The native opcode used by PRED_X is stored as an immediate in the
119
84
        // third operand.
120
84
        MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
121
84
                                            MI.getOperand(2).getImm(), // opcode
122
84
                                            MI.getOperand(0).getReg(), // dst
123
84
                                            MI.getOperand(1).getReg(), // src0
124
84
                                            AMDGPU::ZERO);             // src1
125
84
        TII->addFlag(*PredSet, 0, MO_FLAG_MASK);
126
84
        if (
Flags & 84
MO_FLAG_PUSH84
) {
127
58
          TII->setImmOperand(*PredSet, AMDGPU::OpName::update_exec_mask, 1);
128
84
        } else {
129
26
          TII->setImmOperand(*PredSet, AMDGPU::OpName::update_pred, 1);
130
26
        }
131
84
        MI.eraseFromParent();
132
84
        continue;
133
57.6k
        }
134
32
      case AMDGPU::DOT_4: {
135
32
        const R600RegisterInfo &TRI = TII->getRegisterInfo();
136
32
137
32
        unsigned DstReg = MI.getOperand(0).getReg();
138
32
        unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
139
32
140
160
        for (unsigned Chan = 0; 
Chan < 4160
;
++Chan128
) {
141
128
          bool Mask = (Chan != TRI.getHWRegChan(DstReg));
142
128
          unsigned SubDstReg =
143
128
              AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
144
128
          MachineInstr *BMI =
145
128
              TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
146
128
          if (
Chan > 0128
) {
147
96
            BMI->bundleWithPred();
148
96
          }
149
128
          if (
Mask128
) {
150
96
            TII->addFlag(*BMI, 0, MO_FLAG_MASK);
151
96
          }
152
128
          if (Chan != 3)
153
96
            
TII->addFlag(*BMI, 0, 96
MO_FLAG_NOT_LAST96
);
154
128
          unsigned Opcode = BMI->getOpcode();
155
128
          // While not strictly necessary from hw point of view, we force
156
128
          // all src operands of a dot4 inst to belong to the same slot.
157
128
          unsigned Src0 = BMI->getOperand(
158
128
              TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
159
128
              .getReg();
160
128
          unsigned Src1 = BMI->getOperand(
161
128
              TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
162
128
              .getReg();
163
128
          (void) Src0;
164
128
          (void) Src1;
165
128
          if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
166
96
              (TRI.getEncodingValue(Src1) & 0xff) < 127)
167
128
            assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
168
128
        }
169
32
        MI.eraseFromParent();
170
32
        continue;
171
57.5k
      }
172
57.5k
      }
173
57.5k
174
57.5k
      bool IsReduction = TII->isReductionOp(MI.getOpcode());
175
57.5k
      bool IsVector = TII->isVector(MI);
176
57.5k
      bool IsCube = TII->isCubeOp(MI.getOpcode());
177
57.5k
      if (
!IsReduction && 57.5k
!IsVector57.5k
&&
!IsCube57.4k
) {
178
57.4k
        continue;
179
57.4k
      }
180
39
181
39
      // Expand the instruction
182
39
      //
183
39
      // Reduction instructions:
184
39
      // T0_X = DP4 T1_XYZW, T2_XYZW
185
39
      // becomes:
186
39
      // TO_X = DP4 T1_X, T2_X
187
39
      // TO_Y (write masked) = DP4 T1_Y, T2_Y
188
39
      // TO_Z (write masked) = DP4 T1_Z, T2_Z
189
39
      // TO_W (write masked) = DP4 T1_W, T2_W
190
39
      //
191
39
      // Vector instructions:
192
39
      // T0_X = MULLO_INT T1_X, T2_X
193
39
      // becomes:
194
39
      // T0_X = MULLO_INT T1_X, T2_X
195
39
      // T0_Y (write masked) = MULLO_INT T1_X, T2_X
196
39
      // T0_Z (write masked) = MULLO_INT T1_X, T2_X
197
39
      // T0_W (write masked) = MULLO_INT T1_X, T2_X
198
39
      //
199
39
      // Cube instructions:
200
39
      // T0_XYZW = CUBE T1_XYZW
201
39
      // becomes:
202
39
      // TO_X = CUBE T1_Z, T1_Y
203
39
      // T0_Y = CUBE T1_Z, T1_X
204
39
      // T0_Z = CUBE T1_X, T1_Z
205
39
      // T0_W = CUBE T1_Y, T1_Z
206
195
      
for (unsigned Chan = 0; 39
Chan < 4195
;
Chan++156
) {
207
156
        unsigned DstReg = MI.getOperand(
208
156
                            TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
209
156
        unsigned Src0 = MI.getOperand(
210
156
                           TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
211
156
        unsigned Src1 = 0;
212
156
213
156
        // Determine the correct source registers
214
156
        if (
!IsCube156
) {
215
148
          int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
216
148
          if (
Src1Idx != -1148
) {
217
36
            Src1 = MI.getOperand(Src1Idx).getReg();
218
36
          }
219
148
        }
220
156
        if (
IsReduction156
) {
221
0
          unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
222
0
          Src0 = TRI.getSubReg(Src0, SubRegIndex);
223
0
          Src1 = TRI.getSubReg(Src1, SubRegIndex);
224
156
        } else 
if (156
IsCube156
) {
225
8
          static const int CubeSrcSwz[] = {2, 2, 0, 1};
226
8
          unsigned SubRegIndex0 = TRI.getSubRegFromChannel(CubeSrcSwz[Chan]);
227
8
          unsigned SubRegIndex1 = TRI.getSubRegFromChannel(CubeSrcSwz[3 - Chan]);
228
8
          Src1 = TRI.getSubReg(Src0, SubRegIndex1);
229
8
          Src0 = TRI.getSubReg(Src0, SubRegIndex0);
230
8
        }
231
156
232
156
        // Determine the correct destination registers;
233
156
        bool Mask = false;
234
156
        bool NotLast = true;
235
156
        if (
IsCube156
) {
236
8
          unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
237
8
          DstReg = TRI.getSubReg(DstReg, SubRegIndex);
238
156
        } else {
239
148
          // Mask the write if the original instruction does not write to
240
148
          // the current Channel.
241
148
          Mask = (Chan != TRI.getHWRegChan(DstReg));
242
148
          unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
243
148
          DstReg = AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
244
148
        }
245
156
246
156
        // Set the IsLast bit
247
156
        NotLast = (Chan != 3 );
248
156
249
156
        // Add the new instruction
250
156
        unsigned Opcode = MI.getOpcode();
251
156
        switch (Opcode) {
252
0
        case AMDGPU::CUBE_r600_pseudo:
253
0
          Opcode = AMDGPU::CUBE_r600_real;
254
0
          break;
255
8
        case AMDGPU::CUBE_eg_pseudo:
256
8
          Opcode = AMDGPU::CUBE_eg_real;
257
8
          break;
258
148
        default:
259
148
          break;
260
156
        }
261
156
262
156
        MachineInstr *NewMI =
263
156
          TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1);
264
156
265
156
        if (Chan != 0)
266
117
          NewMI->bundleWithPred();
267
156
        if (
Mask156
) {
268
111
          TII->addFlag(*NewMI, 0, MO_FLAG_MASK);
269
111
        }
270
156
        if (
NotLast156
) {
271
117
          TII->addFlag(*NewMI, 0, MO_FLAG_NOT_LAST);
272
117
        }
273
156
        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::clamp);
274
156
        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::literal);
275
156
        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_abs);
276
156
        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_abs);
277
156
        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_neg);
278
156
        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_neg);
279
156
      }
280
39
      MI.eraseFromParent();
281
39
    }
282
2.05k
  }
283
2.05k
  return false;
284
2.05k
}