Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
Line
Count
Source
1
//===-- NVPTXLowerArgs.cpp - Lower arguments ------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
//
10
// Arguments to kernel and device functions are passed via param space,
11
// which imposes certain restrictions:
12
// http://docs.nvidia.com/cuda/parallel-thread-execution/#state-spaces
13
//
14
// Kernel parameters are read-only and accessible only via ld.param
15
// instruction, directly or via a pointer. Pointers to kernel
16
// arguments can't be converted to generic address space.
17
//
18
// Device function parameters are directly accessible via
19
// ld.param/st.param, but taking the address of one returns a pointer
20
// to a copy created in local space which *can't* be used with
21
// ld.param/st.param.
22
//
23
// Copying a byval struct into local memory in IR allows us to enforce
24
// the param space restrictions, gives the rest of IR a pointer w/o
25
// param space restrictions, and gives us an opportunity to eliminate
26
// the copy.
27
//
28
// Pointer arguments to kernel functions need more work to be lowered:
29
//
30
// 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the
31
//    global address space. This allows later optimizations to emit
32
//    ld.global.*/st.global.* for accessing these pointer arguments. For
33
//    example,
34
//
35
//    define void @foo(float* %input) {
36
//      %v = load float, float* %input, align 4
37
//      ...
38
//    }
39
//
40
//    becomes
41
//
42
//    define void @foo(float* %input) {
43
//      %input2 = addrspacecast float* %input to float addrspace(1)*
44
//      %input3 = addrspacecast float addrspace(1)* %input2 to float*
45
//      %v = load float, float* %input3, align 4
46
//      ...
47
//    }
48
//
49
//    Later, NVPTXInferAddressSpaces will optimize it to
50
//
51
//    define void @foo(float* %input) {
52
//      %input2 = addrspacecast float* %input to float addrspace(1)*
53
//      %v = load float, float addrspace(1)* %input2, align 4
54
//      ...
55
//    }
56
//
57
// 2. Convert pointers in a byval kernel parameter to pointers in the global
58
//    address space. As #2, it allows NVPTX to emit more ld/st.global. E.g.,
59
//
60
//    struct S {
61
//      int *x;
62
//      int *y;
63
//    };
64
//    __global__ void foo(S s) {
65
//      int *b = s.y;
66
//      // use b
67
//    }
68
//
69
//    "b" points to the global address space. In the IR level,
70
//
71
//    define void @foo({i32*, i32*}* byval %input) {
72
//      %b_ptr = getelementptr {i32*, i32*}, {i32*, i32*}* %input, i64 0, i32 1
73
//      %b = load i32*, i32** %b_ptr
74
//      ; use %b
75
//    }
76
//
77
//    becomes
78
//
79
//    define void @foo({i32*, i32*}* byval %input) {
80
//      %b_ptr = getelementptr {i32*, i32*}, {i32*, i32*}* %input, i64 0, i32 1
81
//      %b = load i32*, i32** %b_ptr
82
//      %b_global = addrspacecast i32* %b to i32 addrspace(1)*
83
//      %b_generic = addrspacecast i32 addrspace(1)* %b_global to i32*
84
//      ; use %b_generic
85
//    }
86
//
87
// TODO: merge this pass with NVPTXInferAddressSpaces so that other passes don't
88
// cancel the addrspacecast pair this pass emits.
89
//===----------------------------------------------------------------------===//
90
91
#include "NVPTX.h"
92
#include "NVPTXTargetMachine.h"
93
#include "NVPTXUtilities.h"
94
#include "MCTargetDesc/NVPTXBaseInfo.h"
95
#include "llvm/Analysis/ValueTracking.h"
96
#include "llvm/IR/Function.h"
97
#include "llvm/IR/Instructions.h"
98
#include "llvm/IR/Module.h"
99
#include "llvm/IR/Type.h"
100
#include "llvm/Pass.h"
101
102
using namespace llvm;
103
104
namespace llvm {
105
void initializeNVPTXLowerArgsPass(PassRegistry &);
106
}
107
108
namespace {
109
class NVPTXLowerArgs : public FunctionPass {
110
  bool runOnFunction(Function &F) override;
111
112
  bool runOnKernelFunction(Function &F);
113
  bool runOnDeviceFunction(Function &F);
114
115
  // handle byval parameters
116
  void handleByValParam(Argument *Arg);
117
  // Knowing Ptr must point to the global address space, this function
118
  // addrspacecasts Ptr to global and then back to generic. This allows
119
  // NVPTXInferAddressSpaces to fold the global-to-generic cast into
120
  // loads/stores that appear later.
121
  void markPointerAsGlobal(Value *Ptr);
122
123
public:
124
  static char ID; // Pass identification, replacement for typeid
125
  NVPTXLowerArgs(const NVPTXTargetMachine *TM = nullptr)
126
264
      : FunctionPass(ID), TM(TM) {}
127
1.69k
  StringRef getPassName() const override {
128
1.69k
    return "Lower pointer arguments of CUDA kernels";
129
1.69k
  }
130
131
private:
132
  const NVPTXTargetMachine *TM;
133
};
134
} // namespace
135
136
char NVPTXLowerArgs::ID = 1;
137
138
INITIALIZE_PASS(NVPTXLowerArgs, "nvptx-lower-args",
139
                "Lower arguments (NVPTX)", false, false)
140
141
// =============================================================================
142
// If the function had a byval struct ptr arg, say foo(%struct.x* byval %d),
143
// then add the following instructions to the first basic block:
144
//
145
// %temp = alloca %struct.x, align 8
146
// %tempd = addrspacecast %struct.x* %d to %struct.x addrspace(101)*
147
// %tv = load %struct.x addrspace(101)* %tempd
148
// store %struct.x %tv, %struct.x* %temp, align 8
149
//
150
// The above code allocates some space in the stack and copies the incoming
151
// struct from param space to local space.
152
// Then replace all occurrences of %d by %temp.
153
// =============================================================================
154
9
void NVPTXLowerArgs::handleByValParam(Argument *Arg) {
155
9
  Function *Func = Arg->getParent();
156
9
  Instruction *FirstInst = &(Func->getEntryBlock().front());
157
9
  PointerType *PType = dyn_cast<PointerType>(Arg->getType());
158
9
159
9
  assert(PType && "Expecting pointer type in handleByValParam");
160
9
161
9
  Type *StructType = PType->getElementType();
162
9
  unsigned AS = Func->getParent()->getDataLayout().getAllocaAddrSpace();
163
9
  AllocaInst *AllocA = new AllocaInst(StructType, AS, Arg->getName(), FirstInst);
164
9
  // Set the alignment to alignment of the byval parameter. This is because,
165
9
  // later load/stores assume that alignment, and we are going to replace
166
9
  // the use of the byval parameter with this alloca instruction.
167
9
  AllocA->setAlignment(Func->getParamAlignment(Arg->getArgNo()));
168
9
  Arg->replaceAllUsesWith(AllocA);
169
9
170
9
  Value *ArgInParam = new AddrSpaceCastInst(
171
9
      Arg, PointerType::get(StructType, ADDRESS_SPACE_PARAM), Arg->getName(),
172
9
      FirstInst);
173
9
  LoadInst *LI =
174
9
      new LoadInst(StructType, ArgInParam, Arg->getName(), FirstInst);
175
9
  new StoreInst(LI, AllocA, FirstInst);
176
9
}
177
178
136
void NVPTXLowerArgs::markPointerAsGlobal(Value *Ptr) {
179
136
  if (Ptr->getType()->getPointerAddressSpace() == ADDRESS_SPACE_GLOBAL)
180
4
    return;
181
132
182
132
  // Deciding where to emit the addrspacecast pair.
183
132
  BasicBlock::iterator InsertPt;
184
132
  if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
185
131
    // Insert at the functon entry if Ptr is an argument.
186
131
    InsertPt = Arg->getParent()->getEntryBlock().begin();
187
131
  } else {
188
1
    // Insert right after Ptr if Ptr is an instruction.
189
1
    InsertPt = ++cast<Instruction>(Ptr)->getIterator();
190
1
    assert(InsertPt != InsertPt->getParent()->end() &&
191
1
           "We don't call this function with Ptr being a terminator.");
192
1
  }
193
132
194
132
  Instruction *PtrInGlobal = new AddrSpaceCastInst(
195
132
      Ptr, PointerType::get(Ptr->getType()->getPointerElementType(),
196
132
                            ADDRESS_SPACE_GLOBAL),
197
132
      Ptr->getName(), &*InsertPt);
198
132
  Value *PtrInGeneric = new AddrSpaceCastInst(PtrInGlobal, Ptr->getType(),
199
132
                                              Ptr->getName(), &*InsertPt);
200
132
  // Replace with PtrInGeneric all uses of Ptr except PtrInGlobal.
201
132
  Ptr->replaceAllUsesWith(PtrInGeneric);
202
132
  PtrInGlobal->setOperand(0, Ptr);
203
132
}
204
205
// =============================================================================
206
// Main function for this pass.
207
// =============================================================================
208
99
bool NVPTXLowerArgs::runOnKernelFunction(Function &F) {
209
99
  if (TM && 
TM->getDrvInterface() == NVPTX::CUDA98
) {
210
94
    // Mark pointers in byval structs as global.
211
136
    for (auto &B : F) {
212
593
      for (auto &I : B) {
213
593
        if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
214
84
          if (LI->getType()->isPointerTy()) {
215
11
            Value *UO = GetUnderlyingObject(LI->getPointerOperand(),
216
11
                                            F.getParent()->getDataLayout());
217
11
            if (Argument *Arg = dyn_cast<Argument>(UO)) {
218
3
              if (Arg->hasByValAttr()) {
219
1
                // LI is a load from a pointer within a byval kernel parameter.
220
1
                markPointerAsGlobal(LI);
221
1
              }
222
3
            }
223
11
          }
224
84
        }
225
593
      }
226
136
    }
227
94
  }
228
99
229
190
  for (Argument &Arg : F.args()) {
230
190
    if (Arg.getType()->isPointerTy()) {
231
142
      if (Arg.hasByValAttr())
232
3
        handleByValParam(&Arg);
233
139
      else if (TM && 
TM->getDrvInterface() == NVPTX::CUDA138
)
234
135
        markPointerAsGlobal(&Arg);
235
142
    }
236
190
  }
237
99
  return true;
238
99
}
239
240
// Device functions only need to copy byval args into local memory.
241
1.59k
bool NVPTXLowerArgs::runOnDeviceFunction(Function &F) {
242
1.59k
  for (Argument &Arg : F.args())
243
2.50k
    if (Arg.getType()->isPointerTy() && 
Arg.hasByValAttr()498
)
244
6
      handleByValParam(&Arg);
245
1.59k
  return true;
246
1.59k
}
247
248
1.69k
bool NVPTXLowerArgs::runOnFunction(Function &F) {
249
1.69k
  return isKernelFunction(F) ? 
runOnKernelFunction(F)99
:
runOnDeviceFunction(F)1.59k
;
250
1.69k
}
251
252
FunctionPass *
253
263
llvm::createNVPTXLowerArgsPass(const NVPTXTargetMachine *TM) {
254
263
  return new NVPTXLowerArgs(TM);
255
263
}