Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- SILowerControlFlow.cpp - Use predicates for control flow -----------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
/// \file
11
/// \brief Insert wait instructions for memory reads and writes.
12
///
13
/// Memory reads and writes are issued asynchronously, so we need to insert
14
/// S_WAITCNT instructions when we want to access any of their results or
15
/// overwrite any register that's used asynchronously.
16
//
17
//===----------------------------------------------------------------------===//
18
19
#include "AMDGPU.h"
20
#include "AMDGPUSubtarget.h"
21
#include "SIDefines.h"
22
#include "SIInstrInfo.h"
23
#include "SIMachineFunctionInfo.h"
24
#include "SIRegisterInfo.h"
25
#include "Utils/AMDGPUBaseInfo.h"
26
#include "llvm/ADT/SmallVector.h"
27
#include "llvm/ADT/StringRef.h"
28
#include "llvm/CodeGen/MachineBasicBlock.h"
29
#include "llvm/CodeGen/MachineFunction.h"
30
#include "llvm/CodeGen/MachineFunctionPass.h"
31
#include "llvm/CodeGen/MachineInstr.h"
32
#include "llvm/CodeGen/MachineInstrBuilder.h"
33
#include "llvm/CodeGen/MachineOperand.h"
34
#include "llvm/CodeGen/MachineRegisterInfo.h"
35
#include "llvm/IR/DebugLoc.h"
36
#include "llvm/MC/MCInstrDesc.h"
37
#include "llvm/Pass.h"
38
#include "llvm/Support/Debug.h"
39
#include "llvm/Support/raw_ostream.h"
40
#include <algorithm>
41
#include <cassert>
42
#include <cstdint>
43
#include <cstring>
44
#include <utility>
45
46
#define DEBUG_TYPE "si-insert-waits"
47
48
using namespace llvm;
49
50
namespace {
51
52
/// \brief One variable for each of the hardware counters
53
using Counters = union {
54
  struct {
55
    unsigned VM;
56
    unsigned EXP;
57
    unsigned LGKM;
58
  } Named;
59
  unsigned Array[3];
60
};
61
62
using InstType = enum {
63
  OTHER,
64
  SMEM,
65
  VMEM
66
};
67
68
using RegCounters =  Counters[512];
69
using RegInterval = std::pair<unsigned, unsigned>;
70
71
class SIInsertWaits : public MachineFunctionPass {
72
private:
73
  const SISubtarget *ST = nullptr;
74
  const SIInstrInfo *TII = nullptr;
75
  const SIRegisterInfo *TRI = nullptr;
76
  const MachineRegisterInfo *MRI;
77
  AMDGPU::IsaInfo::IsaVersion ISA;
78
79
  /// \brief Constant zero value
80
  static const Counters ZeroCounts;
81
82
  /// \brief Hardware limits
83
  Counters HardwareLimits;
84
85
  /// \brief Counter values we have already waited on.
86
  Counters WaitedOn;
87
88
  /// \brief Counter values that we must wait on before the next counter
89
  /// increase.
90
  Counters DelayedWaitOn;
91
92
  /// \brief Counter values for last instruction issued.
93
  Counters LastIssued;
94
95
  /// \brief Registers used by async instructions.
96
  RegCounters UsedRegs;
97
98
  /// \brief Registers defined by async instructions.
99
  RegCounters DefinedRegs;
100
101
  /// \brief Different export instruction types seen since last wait.
102
  unsigned ExpInstrTypesSeen = 0;
103
104
  /// \brief Type of the last opcode.
105
  InstType LastOpcodeType;
106
107
  bool LastInstWritesM0;
108
109
  /// Whether or not we have flat operations outstanding.
110
  bool IsFlatOutstanding;
111
112
  /// \brief Whether the machine function returns void
113
  bool ReturnsVoid;
114
115
  /// Whether the VCCZ bit is possibly corrupt
116
  bool VCCZCorrupt = false;
117
118
  /// \brief Get increment/decrement amount for this instruction.
119
  Counters getHwCounts(MachineInstr &MI);
120
121
  /// \brief Is operand relevant for async execution?
122
  bool isOpRelevant(MachineOperand &Op);
123
124
  /// \brief Get register interval an operand affects.
125
  RegInterval getRegInterval(const TargetRegisterClass *RC,
126
                             const MachineOperand &Reg) const;
127
128
  /// \brief Handle instructions async components
129
  void pushInstruction(MachineBasicBlock &MBB,
130
                       MachineBasicBlock::iterator I,
131
                       const Counters& Increment);
132
133
  /// \brief Insert the actual wait instruction
134
  bool insertWait(MachineBasicBlock &MBB,
135
                  MachineBasicBlock::iterator I,
136
                  const Counters &Counts);
137
138
  /// \brief Handle existing wait instructions (from intrinsics)
139
  void handleExistingWait(MachineBasicBlock::iterator I);
140
141
  /// \brief Do we need def2def checks?
142
  bool unorderedDefines(MachineInstr &MI);
143
144
  /// \brief Resolve all operand dependencies to counter requirements
145
  Counters handleOperands(MachineInstr &MI);
146
147
  /// \brief Insert S_NOP between an instruction writing M0 and S_SENDMSG.
148
  void handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I);
149
150
  /// Return true if there are LGKM instrucitons that haven't been waited on
151
  /// yet.
152
  bool hasOutstandingLGKM() const;
153
154
public:
155
  static char ID;
156
157
6
  SIInsertWaits() : MachineFunctionPass(ID) {}
158
159
  bool runOnMachineFunction(MachineFunction &MF) override;
160
161
6
  StringRef getPassName() const override {
162
6
    return "SI insert wait instructions";
163
6
  }
164
165
6
  void getAnalysisUsage(AnalysisUsage &AU) const override {
166
6
    AU.setPreservesCFG();
167
6
    MachineFunctionPass::getAnalysisUsage(AU);
168
6
  }
169
};
170
171
} // end anonymous namespace
172
173
90.0k
INITIALIZE_PASS_BEGIN90.0k
(SIInsertWaits, DEBUG_TYPE,
174
90.0k
                      "SI Insert Waits", false, false)
175
90.0k
INITIALIZE_PASS_END(SIInsertWaits, DEBUG_TYPE,
176
                    "SI Insert Waits", false, false)
177
178
char SIInsertWaits::ID = 0;
179
180
char &llvm::SIInsertWaitsID = SIInsertWaits::ID;
181
182
0
FunctionPass *llvm::createSIInsertWaitsPass() {
183
0
  return new SIInsertWaits();
184
0
}
185
186
const Counters SIInsertWaits::ZeroCounts = { { 0, 0, 0 } };
187
188
50
static bool readsVCCZ(const MachineInstr &MI) {
189
50
  unsigned Opc = MI.getOpcode();
190
50
  return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
191
2
         !MI.getOperand(1).isUndef();
192
50
}
193
194
50
bool SIInsertWaits::hasOutstandingLGKM() const {
195
50
  return WaitedOn.Named.LGKM != LastIssued.Named.LGKM;
196
50
}
197
198
90
Counters SIInsertWaits::getHwCounts(MachineInstr &MI) {
199
90
  uint64_t TSFlags = MI.getDesc().TSFlags;
200
90
  Counters Result = { { 0, 0, 0 } };
201
90
202
90
  Result.Named.VM = !!(TSFlags & SIInstrFlags::VM_CNT);
203
90
204
90
  // Only consider stores or EXP for EXP_CNT
205
11
  Result.Named.EXP = !!(TSFlags & SIInstrFlags::EXP_CNT) && MI.mayStore();
206
90
207
90
  // LGKM may uses larger values
208
90
  if (
TSFlags & SIInstrFlags::LGKM_CNT90
) {
209
24
210
24
    if (
TII->isSMRD(MI)24
) {
211
12
212
12
      if (
MI.getNumOperands() != 012
) {
213
10
        assert(MI.getOperand(0).isReg() &&
214
10
               "First LGKM operand must be a register!");
215
10
216
10
        // XXX - What if this is a write into a super register?
217
10
        const TargetRegisterClass *RC = TII->getOpRegClass(MI, 0);
218
10
        unsigned Size = TRI->getRegSizeInBits(*RC);
219
10
        Result.Named.LGKM = Size > 32 ? 
22
:
18
;
220
12
      } else {
221
2
        // s_dcache_inv etc. do not have a a destination register. Assume we
222
2
        // want a wait on these.
223
2
        // XXX - What is the right value?
224
2
        Result.Named.LGKM = 1;
225
2
      }
226
24
    } else {
227
12
      // DS
228
12
      Result.Named.LGKM = 1;
229
12
    }
230
24
231
90
  } else {
232
66
    Result.Named.LGKM = 0;
233
66
  }
234
90
235
90
  return Result;
236
90
}
237
238
211
bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
239
211
  // Constants are always irrelevant
240
211
  if (
!Op.isReg() || 211
!TRI->isInAllocatableClass(Op.getReg())110
)
241
101
    return false;
242
110
243
110
  // Defines are always relevant
244
110
  
if (110
Op.isDef()110
)
245
16
    return true;
246
94
247
94
  // For exports all registers are relevant.
248
94
  // TODO: Skip undef/disabled registers.
249
94
  MachineInstr &MI = *Op.getParent();
250
94
  if (TII->isEXP(MI))
251
5
    return true;
252
89
253
89
  // For stores the stored value is also relevant
254
89
  
if (89
!MI.getDesc().mayStore()89
)
255
38
    return false;
256
51
257
51
  // Check if this operand is the value being stored.
258
51
  // Special case for DS/FLAT instructions, since the address
259
51
  // operand comes before the value operand and it may have
260
51
  // multiple data operands.
261
51
262
51
  
if (51
TII->isDS(MI)51
) {
263
0
    MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0);
264
0
    if (
Data0 && 0
Op.isIdenticalTo(*Data0)0
)
265
0
      return true;
266
0
267
0
    MachineOperand *Data1 = TII->getNamedOperand(MI, AMDGPU::OpName::data1);
268
0
    return Data1 && Op.isIdenticalTo(*Data1);
269
0
  }
270
51
271
51
  
if (51
TII->isFLAT(MI)51
) {
272
12
    MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::vdata);
273
12
    if (
Data && 12
Op.isIdenticalTo(*Data)12
)
274
3
      return true;
275
48
  }
276
48
277
48
  // NOTE: This assumes that the value operand is before the
278
48
  // address operand, and that there is only one value operand.
279
48
  for (MachineInstr::mop_iterator I = MI.operands_begin(),
280
48
       E = MI.operands_end(); 
I != E48
;
++I0
) {
281
48
282
48
    if (
I->isReg() && 48
I->isUse()48
)
283
48
      return Op.isIdenticalTo(*I);
284
48
  }
285
48
286
0
  return false;
287
211
}
288
289
RegInterval SIInsertWaits::getRegInterval(const TargetRegisterClass *RC,
290
219
                                          const MachineOperand &Reg) const {
291
219
  unsigned Size = TRI->getRegSizeInBits(*RC);
292
219
  assert(Size >= 32);
293
219
294
219
  RegInterval Result;
295
219
  Result.first = TRI->getEncodingValue(Reg.getReg());
296
219
  Result.second = Result.first + Size / 32;
297
219
298
219
  return Result;
299
219
}
300
301
void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
302
                                    MachineBasicBlock::iterator I,
303
90
                                    const Counters &Increment) {
304
90
  // Get the hardware counter increments and sum them up
305
90
  Counters Limit = ZeroCounts;
306
90
  unsigned Sum = 0;
307
90
308
90
  if (TII->mayAccessFlatAddressSpace(*I))
309
8
    IsFlatOutstanding = true;
310
90
311
360
  for (unsigned i = 0; 
i < 3360
;
++i270
) {
312
270
    LastIssued.Array[i] += Increment.Array[i];
313
270
    if (Increment.Array[i])
314
52
      Limit.Array[i] = LastIssued.Array[i];
315
270
    Sum += Increment.Array[i];
316
270
  }
317
90
318
90
  // If we don't increase anything then that's it
319
90
  if (
Sum == 090
) {
320
55
    LastOpcodeType = OTHER;
321
55
    return;
322
55
  }
323
35
324
35
  
if (35
ST->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS35
) {
325
12
    // Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM
326
12
    // or SMEM clause, respectively.
327
12
    //
328
12
    // The temporary workaround is to break the clauses with S_NOP.
329
12
    //
330
12
    // The proper solution would be to allocate registers such that all source
331
12
    // and destination registers don't overlap, e.g. this is illegal:
332
12
    //   r0 = load r2
333
12
    //   r2 = load r0
334
12
    if (
LastOpcodeType == VMEM && 12
Increment.Named.VM1
) {
335
1
      // Insert a NOP to break the clause.
336
1
      BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP))
337
1
          .addImm(0);
338
1
      LastInstWritesM0 = false;
339
1
    }
340
12
341
12
    if (TII->isSMRD(*I))
342
0
      LastOpcodeType = SMEM;
343
12
    else 
if (12
Increment.Named.VM12
)
344
11
      LastOpcodeType = VMEM;
345
12
  }
346
35
347
35
  // Remember which export instructions we have seen
348
35
  if (
Increment.Named.EXP35
) {
349
7
    ExpInstrTypesSeen |= TII->isEXP(*I) ? 
11
:
26
;
350
7
  }
351
35
352
246
  for (unsigned i = 0, e = I->getNumOperands(); 
i != e246
;
++i211
) {
353
211
    MachineOperand &Op = I->getOperand(i);
354
211
    if (!isOpRelevant(Op))
355
171
      continue;
356
40
357
40
    const TargetRegisterClass *RC = TII->getOpRegClass(*I, i);
358
40
    RegInterval Interval = getRegInterval(RC, Op);
359
95
    for (unsigned j = Interval.first; 
j < Interval.second95
;
++j55
) {
360
55
361
55
      // Remember which registers we define
362
55
      if (Op.isDef())
363
27
        DefinedRegs[j] = Limit;
364
55
365
55
      // and which one we are using
366
55
      if (Op.isUse())
367
28
        UsedRegs[j] = Limit;
368
55
    }
369
211
  }
370
90
}
371
372
bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
373
                               MachineBasicBlock::iterator I,
374
116
                               const Counters &Required) {
375
116
  // End of program? No need to wait on anything
376
116
  // A function not returning void needs to wait, because other bytecode will
377
116
  // be appended after it and we don't know what it will be.
378
116
  if (
I != MBB.end() && 116
I->getOpcode() == AMDGPU::S_ENDPGM114
&&
ReturnsVoid28
)
379
28
    return false;
380
88
381
88
  // Figure out if the async instructions execute in order
382
88
  bool Ordered[3];
383
88
384
88
  // VM_CNT is always ordered except when there are flat instructions, which
385
88
  // can return out of order.
386
88
  Ordered[0] = !IsFlatOutstanding;
387
88
388
88
  // EXP_CNT is unordered if we have both EXP & VM-writes
389
88
  Ordered[1] = ExpInstrTypesSeen == 3;
390
88
391
88
  // LGKM_CNT is handled as always unordered. TODO: Handle LDS and GDS
392
88
  Ordered[2] = false;
393
88
394
88
  // The values we are going to put into the S_WAITCNT instruction
395
88
  Counters Counts = HardwareLimits;
396
88
397
88
  // Do we really need to wait?
398
88
  bool NeedWait = false;
399
88
400
352
  for (unsigned i = 0; 
i < 3352
;
++i264
) {
401
264
    if (Required.Array[i] <= WaitedOn.Array[i])
402
240
      continue;
403
24
404
24
    NeedWait = true;
405
24
406
24
    if (
Ordered[i]24
) {
407
6
      unsigned Value = LastIssued.Array[i] - Required.Array[i];
408
6
409
6
      // Adjust the value to the real hardware possibilities.
410
6
      Counts.Array[i] = std::min(Value, HardwareLimits.Array[i]);
411
6
    } else
412
18
      Counts.Array[i] = 0;
413
264
414
264
    // Remember on what we have waited on.
415
264
    WaitedOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
416
264
  }
417
88
418
88
  if (!NeedWait)
419
71
    return false;
420
17
421
17
  // Reset EXP_CNT instruction types
422
17
  
if (17
Counts.Named.EXP == 017
)
423
5
    ExpInstrTypesSeen = 0;
424
116
425
116
  // Build the wait instruction
426
116
  BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
427
116
    .addImm(AMDGPU::encodeWaitcnt(ISA,
428
116
                                  Counts.Named.VM,
429
116
                                  Counts.Named.EXP,
430
116
                                  Counts.Named.LGKM));
431
116
432
116
  LastOpcodeType = OTHER;
433
116
  LastInstWritesM0 = false;
434
116
  IsFlatOutstanding = false;
435
116
  return true;
436
116
}
437
438
/// \brief helper function for handleOperands
439
417
static void increaseCounters(Counters &Dst, const Counters &Src) {
440
1.66k
  for (unsigned i = 0; 
i < 31.66k
;
++i1.25k
)
441
1.25k
    Dst.Array[i] = std::max(Dst.Array[i], Src.Array[i]);
442
417
}
443
444
/// \brief check whether any of the counters is non-zero
445
153
static bool countersNonZero(const Counters &Counter) {
446
491
  for (unsigned i = 0; 
i < 3491
;
++i338
)
447
391
    
if (391
Counter.Array[i]391
)
448
53
      return true;
449
100
  return false;
450
153
}
451
452
0
void SIInsertWaits::handleExistingWait(MachineBasicBlock::iterator I) {
453
0
  assert(I->getOpcode() == AMDGPU::S_WAITCNT);
454
0
455
0
  unsigned Imm = I->getOperand(0).getImm();
456
0
  Counters Counts, WaitOn;
457
0
458
0
  Counts.Named.VM = AMDGPU::decodeVmcnt(ISA, Imm);
459
0
  Counts.Named.EXP = AMDGPU::decodeExpcnt(ISA, Imm);
460
0
  Counts.Named.LGKM = AMDGPU::decodeLgkmcnt(ISA, Imm);
461
0
462
0
  for (unsigned i = 0; 
i < 30
;
++i0
) {
463
0
    if (Counts.Array[i] <= LastIssued.Array[i])
464
0
      WaitOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
465
0
    else
466
0
      WaitOn.Array[i] = 0;
467
0
  }
468
0
469
0
  increaseCounters(DelayedWaitOn, WaitOn);
470
0
}
471
472
90
Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
473
90
  Counters Result = ZeroCounts;
474
90
475
90
  // For each register affected by this instruction increase the result
476
90
  // sequence.
477
90
  //
478
90
  // TODO: We could probably just look at explicit operands if we removed VCC /
479
90
  // EXEC from SMRD dest reg classes.
480
406
  for (unsigned i = 0, e = MI.getNumOperands(); 
i != e406
;
++i316
) {
481
316
    MachineOperand &Op = MI.getOperand(i);
482
316
    if (
!Op.isReg() || 316
!TRI->isInAllocatableClass(Op.getReg())179
)
483
137
      continue;
484
179
485
179
    const TargetRegisterClass *RC = TII->getOpRegClass(MI, i);
486
179
    RegInterval Interval = getRegInterval(RC, Op);
487
483
    for (unsigned j = Interval.first; 
j < Interval.second483
;
++j304
) {
488
304
      if (
Op.isDef()304
) {
489
60
        increaseCounters(Result, UsedRegs[j]);
490
60
        increaseCounters(Result, DefinedRegs[j]);
491
60
      }
492
304
493
304
      if (Op.isUse())
494
244
        increaseCounters(Result, DefinedRegs[j]);
495
304
    }
496
316
  }
497
90
498
90
  return Result;
499
90
}
500
501
void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
502
90
                                  MachineBasicBlock::iterator I) {
503
90
  if (ST->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
504
62
    return;
505
28
506
28
  // There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG.
507
28
  
if (28
LastInstWritesM0 && 28
(I->getOpcode() == AMDGPU::S_SENDMSG || 0
I->getOpcode() == AMDGPU::S_SENDMSGHALT0
)) {
508
0
    BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
509
0
    LastInstWritesM0 = false;
510
0
    return;
511
0
  }
512
28
513
28
  // Set whether this instruction sets M0
514
28
  LastInstWritesM0 = false;
515
28
516
28
  unsigned NumOperands = I->getNumOperands();
517
141
  for (unsigned i = 0; 
i < NumOperands141
;
i++113
) {
518
113
    const MachineOperand &Op = I->getOperand(i);
519
113
520
113
    if (
Op.isReg() && 113
Op.isDef()73
&&
Op.getReg() == AMDGPU::M016
)
521
0
      LastInstWritesM0 = true;
522
113
  }
523
90
}
524
525
/// Return true if \p MBB has one successor immediately following, and is its
526
/// only predecessor
527
28
static bool hasTrivialSuccessor(const MachineBasicBlock &MBB) {
528
28
  if (MBB.succ_size() != 1)
529
20
    return false;
530
8
531
8
  const MachineBasicBlock *Succ = *MBB.succ_begin();
532
4
  return (Succ->pred_size() == 1) && MBB.isLayoutSuccessor(Succ);
533
28
}
534
535
// FIXME: Insert waits listed in Table 4.2 "Required User-Inserted Wait States"
536
// around other non-memory instructions.
537
15
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
538
15
  bool Changes = false;
539
15
540
15
  ST = &MF.getSubtarget<SISubtarget>();
541
15
  TII = ST->getInstrInfo();
542
15
  TRI = &TII->getRegisterInfo();
543
15
  MRI = &MF.getRegInfo();
544
15
  ISA = AMDGPU::IsaInfo::getIsaVersion(ST->getFeatureBits());
545
15
  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
546
15
547
15
  HardwareLimits.Named.VM = AMDGPU::getVmcntBitMask(ISA);
548
15
  HardwareLimits.Named.EXP = AMDGPU::getExpcntBitMask(ISA);
549
15
  HardwareLimits.Named.LGKM = AMDGPU::getLgkmcntBitMask(ISA);
550
15
551
15
  WaitedOn = ZeroCounts;
552
15
  DelayedWaitOn = ZeroCounts;
553
15
  LastIssued = ZeroCounts;
554
15
  LastOpcodeType = OTHER;
555
15
  LastInstWritesM0 = false;
556
15
  IsFlatOutstanding = false;
557
15
  ReturnsVoid = MFI->returnsVoid();
558
15
559
15
  memset(&UsedRegs, 0, sizeof(UsedRegs));
560
15
  memset(&DefinedRegs, 0, sizeof(DefinedRegs));
561
15
562
15
  SmallVector<MachineInstr *, 4> RemoveMI;
563
15
  SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
564
15
565
15
  bool HaveScalarStores = false;
566
15
567
15
  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
568
43
       
BI != BE43
;
++BI28
) {
569
28
    MachineBasicBlock &MBB = *BI;
570
28
571
28
    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
572
118
         
I != E118
;
++I90
) {
573
90
      if (
!HaveScalarStores && 90
TII->isScalarStore(*I)81
)
574
6
        HaveScalarStores = true;
575
90
576
90
      if (
ST->getGeneration() <= SISubtarget::SEA_ISLANDS90
) {
577
62
        // There is a hardware bug on CI/SI where SMRD instruction may corrupt
578
62
        // vccz bit, so when we detect that an instruction may read from a
579
62
        // corrupt vccz bit, we need to:
580
62
        // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD operations to
581
62
        //    complete.
582
62
        // 2. Restore the correct value of vccz by writing the current value
583
62
        //    of vcc back to vcc.
584
62
585
62
        if (
TII->isSMRD(I->getOpcode())62
) {
586
12
          VCCZCorrupt = true;
587
62
        } else 
if (50
!hasOutstandingLGKM() && 50
I->modifiesRegister(AMDGPU::VCC, TRI)37
) {
588
0
          // FIXME: We only care about SMRD instructions here, not LDS or GDS.
589
0
          // Whenever we store a value in vcc, the correct value of vccz is
590
0
          // restored.
591
0
          VCCZCorrupt = false;
592
0
        }
593
62
594
62
        // Check if we need to apply the bug work-around
595
62
        if (
VCCZCorrupt && 62
readsVCCZ(*I)50
) {
596
1
          DEBUG(dbgs() << "Inserting vccz bug work-around before: " << *I << '\n');
597
1
598
1
          // Wait on everything, not just LGKM.  vccz reads usually come from
599
1
          // terminators, and we always wait on everything at the end of the
600
1
          // block, so if we only wait on LGKM here, we might end up with
601
1
          // another s_waitcnt inserted right after this if there are non-LGKM
602
1
          // instructions still outstanding.
603
1
          insertWait(MBB, I, LastIssued);
604
1
605
1
          // Restore the vccz bit.  Any time a value is written to vcc, the vcc
606
1
          // bit is updated, so we can restore the bit by reading the value of
607
1
          // vcc and then writing it back to the register.
608
1
          BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
609
1
                  AMDGPU::VCC)
610
1
            .addReg(AMDGPU::VCC);
611
1
        }
612
62
      }
613
90
614
90
      // Record pre-existing, explicitly requested waits
615
90
      if (
I->getOpcode() == AMDGPU::S_WAITCNT90
) {
616
0
        handleExistingWait(*I);
617
0
        RemoveMI.push_back(&*I);
618
0
        continue;
619
0
      }
620
90
621
90
      Counters Required;
622
90
623
90
      // Wait for everything before a barrier.
624
90
      //
625
90
      // S_SENDMSG implicitly waits for all outstanding LGKM transfers to finish,
626
90
      // but we also want to wait for any other outstanding transfers before
627
90
      // signalling other hardware blocks
628
90
      if ((I->getOpcode() == AMDGPU::S_BARRIER &&
629
0
               !ST->hasAutoWaitcntBeforeBarrier()) ||
630
90
           I->getOpcode() == AMDGPU::S_SENDMSG ||
631
90
           I->getOpcode() == AMDGPU::S_SENDMSGHALT)
632
0
        Required = LastIssued;
633
90
      else
634
90
        Required = handleOperands(*I);
635
90
636
90
      Counters Increment = getHwCounts(*I);
637
90
638
90
      if (
countersNonZero(Required) || 90
countersNonZero(Increment)63
)
639
53
        increaseCounters(Required, DelayedWaitOn);
640
90
641
90
      Changes |= insertWait(MBB, I, Required);
642
90
643
90
      pushInstruction(MBB, I, Increment);
644
90
      handleSendMsg(MBB, I);
645
90
646
90
      if (I->getOpcode() == AMDGPU::S_ENDPGM ||
647
76
          I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
648
16
        EndPgmBlocks.push_back(&MBB);
649
90
    }
650
28
651
28
    // Wait for everything at the end of the MBB. If there is only one
652
28
    // successor, we can defer this until the uses there.
653
28
    if (!hasTrivialSuccessor(MBB))
654
25
      Changes |= insertWait(MBB, MBB.getFirstTerminator(), LastIssued);
655
28
  }
656
15
657
15
  if (
HaveScalarStores15
) {
658
6
    // If scalar writes are used, the cache must be flushed or else the next
659
6
    // wave to reuse the same scratch memory can be clobbered.
660
6
    //
661
6
    // Insert s_dcache_wb at wave termination points if there were any scalar
662
6
    // stores, and only if the cache hasn't already been flushed. This could be
663
6
    // improved by looking across blocks for flushes in postdominating blocks
664
6
    // from the stores but an explicitly requested flush is probably very rare.
665
8
    for (MachineBasicBlock *MBB : EndPgmBlocks) {
666
8
      bool SeenDCacheWB = false;
667
8
668
8
      for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
669
26
           
I != E26
;
++I18
) {
670
18
        if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
671
2
          SeenDCacheWB = true;
672
16
        else 
if (16
TII->isScalarStore(*I)16
)
673
7
          SeenDCacheWB = false;
674
18
675
18
        // FIXME: It would be better to insert this before a waitcnt if any.
676
18
        if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
677
18
             
I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG11
) &&
!SeenDCacheWB8
) {
678
7
          Changes = true;
679
7
          BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
680
7
        }
681
18
      }
682
8
    }
683
6
  }
684
15
685
15
  for (MachineInstr *I : RemoveMI)
686
0
    I->eraseFromParent();
687
15
688
15
  if (
!MFI->isEntryFunction()15
) {
689
2
    // Wait for any outstanding memory operations that the input registers may
690
2
    // depend on. We can't track them and it's better to to the wait after the
691
2
    // costly call sequence.
692
2
693
2
    // TODO: Could insert earlier and schedule more liberally with operations
694
2
    // that only use caller preserved registers.
695
2
    MachineBasicBlock &EntryBB = MF.front();
696
2
    BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
697
2
      .addImm(0);
698
2
699
2
    Changes = true;
700
2
  }
701
15
702
15
  return Changes;
703
15
}