Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Line
Count
Source (jump to first uncovered line)
1
//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This pass eliminates allocas by either converting them into vectors or
10
// by migrating them to local address space.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "AMDGPU.h"
15
#include "AMDGPUSubtarget.h"
16
#include "Utils/AMDGPUBaseInfo.h"
17
#include "llvm/ADT/APInt.h"
18
#include "llvm/ADT/None.h"
19
#include "llvm/ADT/STLExtras.h"
20
#include "llvm/ADT/StringRef.h"
21
#include "llvm/ADT/Triple.h"
22
#include "llvm/ADT/Twine.h"
23
#include "llvm/Analysis/CaptureTracking.h"
24
#include "llvm/Analysis/ValueTracking.h"
25
#include "llvm/CodeGen/TargetPassConfig.h"
26
#include "llvm/IR/Attributes.h"
27
#include "llvm/IR/BasicBlock.h"
28
#include "llvm/IR/Constant.h"
29
#include "llvm/IR/Constants.h"
30
#include "llvm/IR/DataLayout.h"
31
#include "llvm/IR/DerivedTypes.h"
32
#include "llvm/IR/Function.h"
33
#include "llvm/IR/GlobalValue.h"
34
#include "llvm/IR/GlobalVariable.h"
35
#include "llvm/IR/IRBuilder.h"
36
#include "llvm/IR/Instruction.h"
37
#include "llvm/IR/Instructions.h"
38
#include "llvm/IR/IntrinsicInst.h"
39
#include "llvm/IR/Intrinsics.h"
40
#include "llvm/IR/LLVMContext.h"
41
#include "llvm/IR/Metadata.h"
42
#include "llvm/IR/Module.h"
43
#include "llvm/IR/Type.h"
44
#include "llvm/IR/User.h"
45
#include "llvm/IR/Value.h"
46
#include "llvm/Pass.h"
47
#include "llvm/Support/Casting.h"
48
#include "llvm/Support/Debug.h"
49
#include "llvm/Support/ErrorHandling.h"
50
#include "llvm/Support/MathExtras.h"
51
#include "llvm/Support/raw_ostream.h"
52
#include "llvm/Target/TargetMachine.h"
53
#include <algorithm>
54
#include <cassert>
55
#include <cstdint>
56
#include <map>
57
#include <tuple>
58
#include <utility>
59
#include <vector>
60
61
#define DEBUG_TYPE "amdgpu-promote-alloca"
62
63
using namespace llvm;
64
65
namespace {
66
67
static cl::opt<bool> DisablePromoteAllocaToVector(
68
  "disable-promote-alloca-to-vector",
69
  cl::desc("Disable promote alloca to vector"),
70
  cl::init(false));
71
72
static cl::opt<bool> DisablePromoteAllocaToLDS(
73
  "disable-promote-alloca-to-lds",
74
  cl::desc("Disable promote alloca to LDS"),
75
  cl::init(false));
76
77
// FIXME: This can create globals so should be a module pass.
78
class AMDGPUPromoteAlloca : public FunctionPass {
79
private:
80
  const TargetMachine *TM;
81
  Module *Mod = nullptr;
82
  const DataLayout *DL = nullptr;
83
84
  // FIXME: This should be per-kernel.
85
  uint32_t LocalMemLimit = 0;
86
  uint32_t CurrentLocalMemUsage = 0;
87
88
  bool IsAMDGCN = false;
89
  bool IsAMDHSA = false;
90
91
  std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
92
  Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
93
94
  /// BaseAlloca is the alloca root the search started from.
95
  /// Val may be that alloca or a recursive user of it.
96
  bool collectUsesWithPtrTypes(Value *BaseAlloca,
97
                               Value *Val,
98
                               std::vector<Value*> &WorkList) const;
99
100
  /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
101
  /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
102
  /// Returns true if both operands are derived from the same alloca. Val should
103
  /// be the same value as one of the input operands of UseInst.
104
  bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
105
                                       Instruction *UseInst,
106
                                       int OpIdx0, int OpIdx1) const;
107
108
  /// Check whether we have enough local memory for promotion.
109
  bool hasSufficientLocalMem(const Function &F);
110
111
public:
112
  static char ID;
113
114
2.69k
  AMDGPUPromoteAlloca() : FunctionPass(ID) {}
115
116
  bool doInitialization(Module &M) override;
117
  bool runOnFunction(Function &F) override;
118
119
27.3k
  StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
120
121
  bool handleAlloca(AllocaInst &I, bool SufficientLDS);
122
123
2.67k
  void getAnalysisUsage(AnalysisUsage &AU) const override {
124
2.67k
    AU.setPreservesCFG();
125
2.67k
    FunctionPass::getAnalysisUsage(AU);
126
2.67k
  }
127
};
128
129
} // end anonymous namespace
130
131
char AMDGPUPromoteAlloca::ID = 0;
132
133
INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
134
                "AMDGPU promote alloca to vector or LDS", false, false)
135
136
char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
137
138
2.67k
bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
139
2.67k
  Mod = &M;
140
2.67k
  DL = &Mod->getDataLayout();
141
2.67k
142
2.67k
  return false;
143
2.67k
}
144
145
27.3k
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146
27.3k
  if (skipFunction(F))
147
8
    return false;
148
27.3k
149
27.3k
  if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
150
27.3k
    TM = &TPC->getTM<TargetMachine>();
151
0
  else
152
0
    return false;
153
27.3k
154
27.3k
  const Triple &TT = TM->getTargetTriple();
155
27.3k
  IsAMDGCN = TT.getArch() == Triple::amdgcn;
156
27.3k
  IsAMDHSA = TT.getOS() == Triple::AMDHSA;
157
27.3k
158
27.3k
  const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
159
27.3k
  if (!ST.isPromoteAllocaEnabled())
160
709
    return false;
161
26.6k
162
26.6k
  bool SufficientLDS = hasSufficientLocalMem(F);
163
26.6k
  bool Changed = false;
164
26.6k
  BasicBlock &EntryBB = *F.begin();
165
26.6k
166
26.6k
  SmallVector<AllocaInst *, 16> Allocas;
167
158k
  for (Instruction &I : EntryBB) {
168
158k
    if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
169
635
      Allocas.push_back(AI);
170
158k
  }
171
26.6k
172
26.6k
  for (AllocaInst *AI : Allocas) {
173
635
    if (handleAlloca(*AI, SufficientLDS))
174
256
      Changed = true;
175
635
  }
176
26.6k
177
26.6k
  return Changed;
178
26.6k
}
179
180
std::pair<Value *, Value *>
181
173
AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
182
173
  const Function &F = *Builder.GetInsertBlock()->getParent();
183
173
  const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
184
173
185
173
  if (!IsAMDHSA) {
186
109
    Function *LocalSizeYFn
187
109
      = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
188
109
    Function *LocalSizeZFn
189
109
      = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
190
109
191
109
    CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
192
109
    CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
193
109
194
109
    ST.makeLIDRangeMetadata(LocalSizeY);
195
109
    ST.makeLIDRangeMetadata(LocalSizeZ);
196
109
197
109
    return std::make_pair(LocalSizeY, LocalSizeZ);
198
109
  }
199
64
200
64
  // We must read the size out of the dispatch pointer.
201
64
  assert(IsAMDGCN);
202
64
203
64
  // We are indexing into this struct, and want to extract the workgroup_size_*
204
64
  // fields.
205
64
  //
206
64
  //   typedef struct hsa_kernel_dispatch_packet_s {
207
64
  //     uint16_t header;
208
64
  //     uint16_t setup;
209
64
  //     uint16_t workgroup_size_x ;
210
64
  //     uint16_t workgroup_size_y;
211
64
  //     uint16_t workgroup_size_z;
212
64
  //     uint16_t reserved0;
213
64
  //     uint32_t grid_size_x ;
214
64
  //     uint32_t grid_size_y ;
215
64
  //     uint32_t grid_size_z;
216
64
  //
217
64
  //     uint32_t private_segment_size;
218
64
  //     uint32_t group_segment_size;
219
64
  //     uint64_t kernel_object;
220
64
  //
221
64
  // #ifdef HSA_LARGE_MODEL
222
64
  //     void *kernarg_address;
223
64
  // #elif defined HSA_LITTLE_ENDIAN
224
64
  //     void *kernarg_address;
225
64
  //     uint32_t reserved1;
226
64
  // #else
227
64
  //     uint32_t reserved1;
228
64
  //     void *kernarg_address;
229
64
  // #endif
230
64
  //     uint64_t reserved2;
231
64
  //     hsa_signal_t completion_signal; // uint64_t wrapper
232
64
  //   } hsa_kernel_dispatch_packet_t
233
64
  //
234
64
  Function *DispatchPtrFn
235
64
    = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
236
64
237
64
  CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
238
64
  DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
239
64
  DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
240
64
241
64
  // Size of the dispatch packet struct.
242
64
  DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
243
64
244
64
  Type *I32Ty = Type::getInt32Ty(Mod->getContext());
245
64
  Value *CastDispatchPtr = Builder.CreateBitCast(
246
64
    DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
247
64
248
64
  // We could do a single 64-bit load here, but it's likely that the basic
249
64
  // 32-bit and extract sequence is already present, and it is probably easier
250
64
  // to CSE this. The loads should be mergable later anyway.
251
64
  Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
252
64
  LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4);
253
64
254
64
  Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
255
64
  LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4);
256
64
257
64
  MDNode *MD = MDNode::get(Mod->getContext(), None);
258
64
  LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
259
64
  LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
260
64
  ST.makeLIDRangeMetadata(LoadZU);
261
64
262
64
  // Extract y component. Upper half of LoadZU should be zero already.
263
64
  Value *Y = Builder.CreateLShr(LoadXY, 16);
264
64
265
64
  return std::make_pair(Y, LoadZU);
266
64
}
267
268
519
Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
269
519
  const AMDGPUSubtarget &ST =
270
519
      AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent());
271
519
  Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
272
519
273
519
  switch (N) {
274
519
  case 0:
275
173
    IntrID = IsAMDGCN ? 
Intrinsic::amdgcn_workitem_id_x121
276
173
      : 
Intrinsic::r600_read_tidig_x52
;
277
173
    break;
278
519
  case 1:
279
173
    IntrID = IsAMDGCN ? 
Intrinsic::amdgcn_workitem_id_y121
280
173
      : 
Intrinsic::r600_read_tidig_y52
;
281
173
    break;
282
519
283
519
  case 2:
284
173
    IntrID = IsAMDGCN ? 
Intrinsic::amdgcn_workitem_id_z121
285
173
      : 
Intrinsic::r600_read_tidig_z52
;
286
173
    break;
287
519
  default:
288
0
    llvm_unreachable("invalid dimension");
289
519
  }
290
519
291
519
  Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
292
519
  CallInst *CI = Builder.CreateCall(WorkitemIdFn);
293
519
  ST.makeLIDRangeMetadata(CI);
294
519
295
519
  return CI;
296
519
}
297
298
63
static VectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
299
63
  return VectorType::get(ArrayTy->getElementType(),
300
63
                         ArrayTy->getNumElements());
301
63
}
302
303
static Value *
304
calculateVectorIndex(Value *Ptr,
305
232
                     const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
306
232
  GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
307
232
308
232
  auto I = GEPIdx.find(GEP);
309
232
  return I == GEPIdx.end() ? 
nullptr0
: I->second;
310
232
}
311
312
299
static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
313
299
  // FIXME we only support simple cases
314
299
  if (GEP->getNumOperands() != 3)
315
0
    return nullptr;
316
299
317
299
  ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
318
299
  if (!I0 || !I0->isZero())
319
1
    return nullptr;
320
298
321
298
  return GEP->getOperand(2);
322
298
}
323
324
// Not an instruction handled below to turn into a vector.
325
//
326
// TODO: Check isTriviallyVectorizable for calls and handle other
327
// instructions.
328
340
static bool canVectorizeInst(Instruction *Inst, User *User) {
329
340
  switch (Inst->getOpcode()) {
330
340
  case Instruction::Load: {
331
94
    // Currently only handle the case where the Pointer Operand is a GEP.
332
94
    // Also we could not vectorize volatile or atomic loads.
333
94
    LoadInst *LI = cast<LoadInst>(Inst);
334
94
    if (isa<AllocaInst>(User) &&
335
94
        
LI->getPointerOperandType() == User->getType()12
&&
336
94
        
isa<VectorType>(LI->getType())12
)
337
11
      return true;
338
83
    return isa<GetElementPtrInst>(LI->getPointerOperand()) && 
LI->isSimple()82
;
339
83
  }
340
83
  case Instruction::BitCast:
341
21
    return true;
342
199
  case Instruction::Store: {
343
199
    // Must be the stored pointer operand, not a stored value, plus
344
199
    // since it should be canonical form, the User should be a GEP.
345
199
    // Also we could not vectorize volatile or atomic stores.
346
199
    StoreInst *SI = cast<StoreInst>(Inst);
347
199
    if (isa<AllocaInst>(User) &&
348
199
        
SI->getPointerOperandType() == User->getType()11
&&
349
199
        
isa<VectorType>(SI->getValueOperand()->getType())11
)
350
9
      return true;
351
190
    return (SI->getPointerOperand() == User) && 
isa<GetElementPtrInst>(User)189
&&
SI->isSimple()187
;
352
190
  }
353
190
  default:
354
26
    return false;
355
340
  }
356
340
}
357
358
610
static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
359
610
360
610
  if (DisablePromoteAllocaToVector) {
361
233
    LLVM_DEBUG(dbgs() << "  Promotion alloca to vector is disabled\n");
362
233
    return false;
363
233
  }
364
377
365
377
  Type *AT = Alloca->getAllocatedType();
366
377
  SequentialType *AllocaTy = dyn_cast<SequentialType>(AT);
367
377
368
377
  LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
369
377
370
377
  // FIXME: There is no reason why we can't support larger arrays, we
371
377
  // are just being conservative for now.
372
377
  // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
373
377
  // could also be promoted but we don't currently handle this case
374
377
  if (!AllocaTy ||
375
377
      
AllocaTy->getNumElements() > 16228
||
376
377
      
AllocaTy->getNumElements() < 2177
||
377
377
      
!VectorType::isValidElementType(AllocaTy->getElementType())171
) {
378
233
    LLVM_DEBUG(dbgs() << "  Cannot convert type to vector\n");
379
233
    return false;
380
233
  }
381
144
382
144
  std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
383
144
  std::vector<Value*> WorkList;
384
338
  for (User *AllocaUser : Alloca->users()) {
385
338
    GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
386
338
    if (!GEP) {
387
39
      if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
388
9
        return false;
389
30
390
30
      WorkList.push_back(AllocaUser);
391
30
      continue;
392
30
    }
393
299
394
299
    Value *Index = GEPToVectorIndex(GEP);
395
299
396
299
    // If we can't compute a vector index from this GEP, then we can't
397
299
    // promote this alloca to vector.
398
299
    if (!Index) {
399
1
      LLVM_DEBUG(dbgs() << "  Cannot compute vector index for GEP " << *GEP
400
1
                        << '\n');
401
1
      return false;
402
1
    }
403
298
404
298
    GEPVectorIdx[GEP] = Index;
405
301
    for (User *GEPUser : AllocaUser->users()) {
406
301
      if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
407
51
        return false;
408
250
409
250
      WorkList.push_back(GEPUser);
410
250
    }
411
298
  }
412
144
413
144
  VectorType *VectorTy = dyn_cast<VectorType>(AllocaTy);
414
83
  if (!VectorTy)
415
63
    VectorTy = arrayTypeToVecType(cast<ArrayType>(AllocaTy));
416
83
417
83
  LLVM_DEBUG(dbgs() << "  Converting alloca to vector " << *AllocaTy << " -> "
418
83
                    << *VectorTy << '\n');
419
83
420
270
  for (Value *V : WorkList) {
421
270
    Instruction *Inst = cast<Instruction>(V);
422
270
    IRBuilder<> Builder(Inst);
423
270
    switch (Inst->getOpcode()) {
424
270
    case Instruction::Load: {
425
85
      if (Inst->getType() == AT)
426
11
        break;
427
74
428
74
      Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
429
74
      Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
430
74
      Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
431
74
432
74
      Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
433
74
      Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
434
74
      Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
435
74
      Inst->replaceAllUsesWith(ExtractElement);
436
74
      Inst->eraseFromParent();
437
74
      break;
438
74
    }
439
167
    case Instruction::Store: {
440
167
      StoreInst *SI = cast<StoreInst>(Inst);
441
167
      if (SI->getValueOperand()->getType() == AT)
442
9
        break;
443
158
444
158
      Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
445
158
      Value *Ptr = SI->getPointerOperand();
446
158
      Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
447
158
      Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
448
158
      Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
449
158
      Value *NewVecValue = Builder.CreateInsertElement(VecValue,
450
158
                                                       SI->getValueOperand(),
451
158
                                                       Index);
452
158
      Builder.CreateStore(NewVecValue, BitCast);
453
158
      Inst->eraseFromParent();
454
158
      break;
455
158
    }
456
158
    case Instruction::BitCast:
457
18
    case Instruction::AddrSpaceCast:
458
18
      break;
459
18
460
18
    default:
461
0
      llvm_unreachable("Inconsistency in instructions promotable to vector");
462
270
    }
463
270
  }
464
83
  return true;
465
83
}
466
467
34
static bool isCallPromotable(CallInst *CI) {
468
34
  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
469
34
  if (!II)
470
21
    return false;
471
13
472
13
  switch (II->getIntrinsicID()) {
473
13
  case Intrinsic::memcpy:
474
12
  case Intrinsic::memmove:
475
12
  case Intrinsic::memset:
476
12
  case Intrinsic::lifetime_start:
477
12
  case Intrinsic::lifetime_end:
478
12
  case Intrinsic::invariant_start:
479
12
  case Intrinsic::invariant_end:
480
12
  case Intrinsic::launder_invariant_group:
481
12
  case Intrinsic::strip_invariant_group:
482
12
  case Intrinsic::objectsize:
483
12
    return true;
484
12
  default:
485
1
    return false;
486
13
  }
487
13
}
488
489
bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
490
                                                          Value *Val,
491
                                                          Instruction *Inst,
492
                                                          int OpIdx0,
493
23
                                                          int OpIdx1) const {
494
23
  // Figure out which operand is the one we might not be promoting.
495
23
  Value *OtherOp = Inst->getOperand(OpIdx0);
496
23
  if (Val == OtherOp)
497
5
    OtherOp = Inst->getOperand(OpIdx1);
498
23
499
23
  if (isa<ConstantPointerNull>(OtherOp))
500
6
    return true;
501
17
502
17
  Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
503
17
  if (!isa<AllocaInst>(OtherObj))
504
7
    return false;
505
10
506
10
  // TODO: We should be able to replace undefs with the right pointer type.
507
10
508
10
  // TODO: If we know the other base object is another promotable
509
10
  // alloca, not necessarily this alloca, we can do this. The
510
10
  // important part is both must have the same address space at
511
10
  // the end.
512
10
  if (OtherObj != BaseAlloca) {
513
1
    LLVM_DEBUG(
514
1
        dbgs() << "Found a binary instruction with another alloca object\n");
515
1
    return false;
516
1
  }
517
9
518
9
  return true;
519
9
}
520
521
bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
522
  Value *BaseAlloca,
523
  Value *Val,
524
748
  std::vector<Value*> &WorkList) const {
525
748
526
1.11k
  for (User *User : Val->users()) {
527
1.11k
    if (is_contained(WorkList, User))
528
9
      continue;
529
1.10k
530
1.10k
    if (CallInst *CI = dyn_cast<CallInst>(User)) {
531
34
      if (!isCallPromotable(CI))
532
22
        return false;
533
12
534
12
      WorkList.push_back(User);
535
12
      continue;
536
12
    }
537
1.07k
538
1.07k
    Instruction *UseInst = cast<Instruction>(User);
539
1.07k
    if (UseInst->getOpcode() == Instruction::PtrToInt)
540
20
      return false;
541
1.05k
542
1.05k
    if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
543
244
      if (LI->isVolatile())
544
1
        return false;
545
243
546
243
      continue;
547
243
    }
548
809
549
809
    if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
550
284
      if (SI->isVolatile())
551
14
        return false;
552
270
553
270
      // Reject if the stored value is not the pointer operand.
554
270
      if (SI->getPointerOperand() != Val)
555
1
        return false;
556
525
    } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
557
2
      if (RMW->isVolatile())
558
0
        return false;
559
523
    } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
560
2
      if (CAS->isVolatile())
561
0
        return false;
562
794
    }
563
794
564
794
    // Only promote a select if we know that the other select operand
565
794
    // is from another pointer that will also be promoted.
566
794
    if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
567
6
      if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
568
3
        return false;
569
3
570
3
      // May need to rewrite constant operands.
571
3
      WorkList.push_back(ICmp);
572
3
    }
573
794
574
794
    
if (791
UseInst->getOpcode() == Instruction::AddrSpaceCast791
) {
575
4
      // Give up if the pointer may be captured.
576
4
      if (PointerMayBeCaptured(UseInst, true, true))
577
3
        return false;
578
1
      // Don't collect the users of this.
579
1
      WorkList.push_back(User);
580
1
      continue;
581
1
    }
582
787
583
787
    if (!User->getType()->isPointerTy())
584
276
      continue;
585
511
586
511
    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
587
482
      // Be conservative if an address could be computed outside the bounds of
588
482
      // the alloca.
589
482
      if (!GEP->isInBounds())
590
87
        return false;
591
424
    }
592
424
593
424
    // Only promote a select if we know that the other select operand is from
594
424
    // another pointer that will also be promoted.
595
424
    if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
596
11
      if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
597
2
        return false;
598
422
    }
599
422
600
422
    // Repeat for phis.
601
422
    if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
602
7
      // TODO: Handle more complex cases. We should be able to replace loops
603
7
      // over arrays.
604
7
      switch (Phi->getNumIncomingValues()) {
605
7
      case 1:
606
1
        break;
607
7
      case 2:
608
6
        if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
609
3
          return false;
610
3
        break;
611
3
      default:
612
0
        return false;
613
419
      }
614
419
    }
615
419
616
419
    WorkList.push_back(User);
617
419
    if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
618
7
      return false;
619
419
  }
620
748
621
748
  
return true585
;
622
748
}
623
624
26.6k
bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
625
26.6k
626
26.6k
  FunctionType *FTy = F.getFunctionType();
627
26.6k
  const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
628
26.6k
629
26.6k
  // If the function has any arguments in the local address space, then it's
630
26.6k
  // possible these arguments require the entire local memory space, so
631
26.6k
  // we cannot use local memory in the pass.
632
59.5k
  for (Type *ParamTy : FTy->params()) {
633
59.5k
    PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
634
59.5k
    if (PtrTy && 
PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS32.3k
) {
635
2.21k
      LocalMemLimit = 0;
636
2.21k
      LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
637
2.21k
                           "local memory disabled.\n");
638
2.21k
      return false;
639
2.21k
    }
640
59.5k
  }
641
26.6k
642
26.6k
  LocalMemLimit = ST.getLocalMemorySize();
643
24.3k
  if (LocalMemLimit == 0)
644
29
    return false;
645
24.3k
646
24.3k
  const DataLayout &DL = Mod->getDataLayout();
647
24.3k
648
24.3k
  // Check how much local memory is being used by global objects
649
24.3k
  CurrentLocalMemUsage = 0;
650
24.3k
  for (GlobalVariable &GV : Mod->globals()) {
651
3.76k
    if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
652
821
      continue;
653
2.94k
654
7.97k
    
for (const User *U : GV.users())2.94k
{
655
7.97k
      const Instruction *Use = dyn_cast<Instruction>(U);
656
7.97k
      if (!Use)
657
685
        continue;
658
7.29k
659
7.29k
      if (Use->getParent()->getParent() == &F) {
660
361
        unsigned Align = GV.getAlignment();
661
361
        if (Align == 0)
662
32
          Align = DL.getABITypeAlignment(GV.getValueType());
663
361
664
361
        // FIXME: Try to account for padding here. The padding is currently
665
361
        // determined from the inverse order of uses in the function. I'm not
666
361
        // sure if the use list order is in any way connected to this, so the
667
361
        // total reported size is likely incorrect.
668
361
        uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
669
361
        CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
670
361
        CurrentLocalMemUsage += AllocSize;
671
361
        break;
672
361
      }
673
7.29k
    }
674
2.94k
  }
675
24.3k
676
24.3k
  unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
677
24.3k
                                                          F);
678
24.3k
679
24.3k
  // Restrict local memory usage so that we don't drastically reduce occupancy,
680
24.3k
  // unless it is already significantly reduced.
681
24.3k
682
24.3k
  // TODO: Have some sort of hint or other heuristics to guess occupancy based
683
24.3k
  // on other factors..
684
24.3k
  unsigned OccupancyHint = ST.getWavesPerEU(F).second;
685
24.3k
  if (OccupancyHint == 0)
686
0
    OccupancyHint = 7;
687
24.3k
688
24.3k
  // Clamp to max value.
689
24.3k
  OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
690
24.3k
691
24.3k
  // Check the hint but ignore it if it's obviously wrong from the existing LDS
692
24.3k
  // usage.
693
24.3k
  MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
694
24.3k
695
24.3k
696
24.3k
  // Round up to the next tier of usage.
697
24.3k
  unsigned MaxSizeWithWaveCount
698
24.3k
    = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
699
24.3k
700
24.3k
  // Program is possibly broken by using more local mem than available.
701
24.3k
  if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
702
1
    return false;
703
24.3k
704
24.3k
  LocalMemLimit = MaxSizeWithWaveCount;
705
24.3k
706
24.3k
  LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
707
24.3k
                    << " bytes of LDS\n"
708
24.3k
                    << "  Rounding size to " << MaxSizeWithWaveCount
709
24.3k
                    << " with a maximum occupancy of " << MaxOccupancy << '\n'
710
24.3k
                    << " and " << (LocalMemLimit - CurrentLocalMemUsage)
711
24.3k
                    << " available for promotion\n");
712
24.3k
713
24.3k
  return true;
714
24.3k
}
715
716
// FIXME: Should try to pick the most likely to be profitable allocas first.
717
635
bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
718
635
  // Array allocations are probably not worth handling, since an allocation of
719
635
  // the array type is the canonical form.
720
635
  if (!I.isStaticAlloca() || 
I.isArrayAllocation()632
)
721
25
    return false;
722
610
723
610
  IRBuilder<> Builder(&I);
724
610
725
610
  // First try to replace the alloca with a vector
726
610
  Type *AllocaTy = I.getAllocatedType();
727
610
728
610
  LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
729
610
730
610
  if (tryPromoteAllocaToVector(&I))
731
83
    return true; // Promoted to vector.
732
527
733
527
  if (DisablePromoteAllocaToLDS)
734
4
    return false;
735
523
736
523
  const Function &ContainingFunction = *I.getParent()->getParent();
737
523
  CallingConv::ID CC = ContainingFunction.getCallingConv();
738
523
739
523
  // Don't promote the alloca to LDS for shader calling conventions as the work
740
523
  // item ID intrinsics are not supported for these calling conventions.
741
523
  // Furthermore not all LDS is available for some of the stages.
742
523
  switch (CC) {
743
523
  case CallingConv::AMDGPU_KERNEL:
744
405
  case CallingConv::SPIR_KERNEL:
745
405
    break;
746
405
  default:
747
118
    LLVM_DEBUG(
748
118
        dbgs()
749
118
        << " promote alloca to LDS not supported with calling convention.\n");
750
118
    return false;
751
405
  }
752
405
753
405
  // Not likely to have sufficient local memory for promotion.
754
405
  if (!SufficientLDS)
755
1
    return false;
756
404
757
404
  const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction);
758
404
  unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
759
404
760
404
  const DataLayout &DL = Mod->getDataLayout();
761
404
762
404
  unsigned Align = I.getAlignment();
763
404
  if (Align == 0)
764
208
    Align = DL.getABITypeAlignment(I.getAllocatedType());
765
404
766
404
  // FIXME: This computed padding is likely wrong since it depends on inverse
767
404
  // usage order.
768
404
  //
769
404
  // FIXME: It is also possible that if we're allowed to use all of the memory
770
404
  // could could end up using more than the maximum due to alignment padding.
771
404
772
404
  uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
773
404
  uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
774
404
  NewSize += AllocSize;
775
404
776
404
  if (NewSize > LocalMemLimit) {
777
75
    LLVM_DEBUG(dbgs() << "  " << AllocSize
778
75
                      << " bytes of local memory not available to promote\n");
779
75
    return false;
780
75
  }
781
329
782
329
  CurrentLocalMemUsage = NewSize;
783
329
784
329
  std::vector<Value*> WorkList;
785
329
786
329
  if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
787
156
    LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
788
156
    return false;
789
156
  }
790
173
791
173
  LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
792
173
793
173
  Function *F = I.getParent()->getParent();
794
173
795
173
  Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
796
173
  GlobalVariable *GV = new GlobalVariable(
797
173
      *Mod, GVTy, false, GlobalValue::InternalLinkage,
798
173
      UndefValue::get(GVTy),
799
173
      Twine(F->getName()) + Twine('.') + I.getName(),
800
173
      nullptr,
801
173
      GlobalVariable::NotThreadLocal,
802
173
      AMDGPUAS::LOCAL_ADDRESS);
803
173
  GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
804
173
  GV->setAlignment(I.getAlignment());
805
173
806
173
  Value *TCntY, *TCntZ;
807
173
808
173
  std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
809
173
  Value *TIdX = getWorkitemID(Builder, 0);
810
173
  Value *TIdY = getWorkitemID(Builder, 1);
811
173
  Value *TIdZ = getWorkitemID(Builder, 2);
812
173
813
173
  Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
814
173
  Tmp0 = Builder.CreateMul(Tmp0, TIdX);
815
173
  Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
816
173
  Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
817
173
  TID = Builder.CreateAdd(TID, TIdZ);
818
173
819
173
  Value *Indices[] = {
820
173
    Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
821
173
    TID
822
173
  };
823
173
824
173
  Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
825
173
  I.mutateType(Offset->getType());
826
173
  I.replaceAllUsesWith(Offset);
827
173
  I.eraseFromParent();
828
173
829
398
  for (Value *V : WorkList) {
830
398
    CallInst *Call = dyn_cast<CallInst>(V);
831
398
    if (!Call) {
832
386
      if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
833
3
        Value *Src0 = CI->getOperand(0);
834
3
        Type *EltTy = Src0->getType()->getPointerElementType();
835
3
        PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
836
3
837
3
        if (isa<ConstantPointerNull>(CI->getOperand(0)))
838
1
          CI->setOperand(0, ConstantPointerNull::get(NewTy));
839
3
840
3
        if (isa<ConstantPointerNull>(CI->getOperand(1)))
841
1
          CI->setOperand(1, ConstantPointerNull::get(NewTy));
842
3
843
3
        continue;
844
3
      }
845
383
846
383
      // The operand's value should be corrected on its own and we don't want to
847
383
      // touch the users.
848
383
      if (isa<AddrSpaceCastInst>(V))
849
1
        continue;
850
382
851
382
      Type *EltTy = V->getType()->getPointerElementType();
852
382
      PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
853
382
854
382
      // FIXME: It doesn't really make sense to try to do this for all
855
382
      // instructions.
856
382
      V->mutateType(NewTy);
857
382
858
382
      // Adjust the types of any constant operands.
859
382
      if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
860
9
        if (isa<ConstantPointerNull>(SI->getOperand(1)))
861
1
          SI->setOperand(1, ConstantPointerNull::get(NewTy));
862
9
863
9
        if (isa<ConstantPointerNull>(SI->getOperand(2)))
864
1
          SI->setOperand(2, ConstantPointerNull::get(NewTy));
865
373
      } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
866
11
        for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; 
++I7
) {
867
7
          if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
868
2
            Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
869
7
        }
870
4
      }
871
382
872
382
      continue;
873
382
    }
874
12
875
12
    IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
876
12
    Builder.SetInsertPoint(Intr);
877
12
    switch (Intr->getIntrinsicID()) {
878
12
    case Intrinsic::lifetime_start:
879
3
    case Intrinsic::lifetime_end:
880
3
      // These intrinsics are for address space 0 only
881
3
      Intr->eraseFromParent();
882
3
      continue;
883
3
    case Intrinsic::memcpy: {
884
2
      MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
885
2
      Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(),
886
2
                           MemCpy->getRawSource(), MemCpy->getSourceAlignment(),
887
2
                           MemCpy->getLength(), MemCpy->isVolatile());
888
2
      Intr->eraseFromParent();
889
2
      continue;
890
3
    }
891
3
    case Intrinsic::memmove: {
892
2
      MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
893
2
      Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(),
894
2
                            MemMove->getRawSource(), MemMove->getSourceAlignment(),
895
2
                            MemMove->getLength(), MemMove->isVolatile());
896
2
      Intr->eraseFromParent();
897
2
      continue;
898
3
    }
899
3
    case Intrinsic::memset: {
900
1
      MemSetInst *MemSet = cast<MemSetInst>(Intr);
901
1
      Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
902
1
                           MemSet->getLength(), MemSet->getDestAlignment(),
903
1
                           MemSet->isVolatile());
904
1
      Intr->eraseFromParent();
905
1
      continue;
906
3
    }
907
3
    case Intrinsic::invariant_start:
908
3
    case Intrinsic::invariant_end:
909
3
    case Intrinsic::launder_invariant_group:
910
3
    case Intrinsic::strip_invariant_group:
911
3
      Intr->eraseFromParent();
912
3
      // FIXME: I think the invariant marker should still theoretically apply,
913
3
      // but the intrinsics need to be changed to accept pointers with any
914
3
      // address space.
915
3
      continue;
916
3
    case Intrinsic::objectsize: {
917
1
      Value *Src = Intr->getOperand(0);
918
1
      Type *SrcTy = Src->getType()->getPointerElementType();
919
1
      Function *ObjectSize = Intrinsic::getDeclaration(Mod,
920
1
        Intrinsic::objectsize,
921
1
        { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
922
1
      );
923
1
924
1
      CallInst *NewCall = Builder.CreateCall(
925
1
          ObjectSize,
926
1
          {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
927
1
      Intr->replaceAllUsesWith(NewCall);
928
1
      Intr->eraseFromParent();
929
1
      continue;
930
3
    }
931
3
    default:
932
0
      Intr->print(errs());
933
0
      llvm_unreachable("Don't know how to promote alloca intrinsic use.");
934
12
    }
935
12
  }
936
173
  return true;
937
173
}
938
939
2.67k
FunctionPass *llvm::createAMDGPUPromoteAlloca() {
940
2.67k
  return new AMDGPUPromoteAlloca();
941
2.67k
}