Coverage Report

Created: 2020-10-24 06:27

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
Line
Count
Source (jump to first uncovered line)
1
//===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This provides a generalized class for OpenMP runtime code generation
10
// specialized by GPU targets NVPTX and AMDGCN.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "CGOpenMPRuntimeGPU.h"
15
#include "CGOpenMPRuntimeNVPTX.h"
16
#include "CodeGenFunction.h"
17
#include "clang/AST/Attr.h"
18
#include "clang/AST/DeclOpenMP.h"
19
#include "clang/AST/StmtOpenMP.h"
20
#include "clang/AST/StmtVisitor.h"
21
#include "clang/Basic/Cuda.h"
22
#include "llvm/ADT/SmallPtrSet.h"
23
#include "llvm/Frontend/OpenMP/OMPGridValues.h"
24
#include "llvm/IR/IntrinsicsNVPTX.h"
25
26
using namespace clang;
27
using namespace CodeGen;
28
using namespace llvm::omp;
29
30
namespace {
31
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
32
class NVPTXActionTy final : public PrePostActionTy {
33
  llvm::FunctionCallee EnterCallee = nullptr;
34
  ArrayRef<llvm::Value *> EnterArgs;
35
  llvm::FunctionCallee ExitCallee = nullptr;
36
  ArrayRef<llvm::Value *> ExitArgs;
37
  bool Conditional = false;
38
  llvm::BasicBlock *ContBlock = nullptr;
39
40
public:
41
  NVPTXActionTy(llvm::FunctionCallee EnterCallee,
42
                ArrayRef<llvm::Value *> EnterArgs,
43
                llvm::FunctionCallee ExitCallee,
44
                ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
45
      : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
46
58
        ExitArgs(ExitArgs), Conditional(Conditional) {}
47
19
  void Enter(CodeGenFunction &CGF) override {
48
19
    llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
49
19
    if (Conditional) {
50
0
      llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
51
0
      auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
52
0
      ContBlock = CGF.createBasicBlock("omp_if.end");
53
      // Generate the branch (If-stmt)
54
0
      CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
55
0
      CGF.EmitBlock(ThenBlock);
56
0
    }
57
19
  }
58
0
  void Done(CodeGenFunction &CGF) {
59
0
    // Emit the rest of blocks/branches
60
0
    CGF.EmitBranch(ContBlock);
61
0
    CGF.EmitBlock(ContBlock, true);
62
0
  }
63
58
  void Exit(CodeGenFunction &CGF) override {
64
58
    CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
65
58
  }
66
};
67
68
/// A class to track the execution mode when codegening directives within
69
/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
70
/// to the target region and used by containing directives such as 'parallel'
71
/// to emit optimized code.
72
class ExecutionRuntimeModesRAII {
73
private:
74
  CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
75
      CGOpenMPRuntimeGPU::EM_Unknown;
76
  CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
77
  bool SavedRuntimeMode = false;
78
  bool *RuntimeMode = nullptr;
79
80
public:
81
  /// Constructor for Non-SPMD mode.
82
  ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
83
189
      : ExecMode(ExecMode) {
84
189
    SavedExecMode = ExecMode;
85
189
    ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
86
189
  }
87
  /// Constructor for SPMD mode.
88
  ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
89
                            bool &RuntimeMode, bool FullRuntimeMode)
90
676
      : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
91
676
    SavedExecMode = ExecMode;
92
676
    SavedRuntimeMode = RuntimeMode;
93
676
    ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
94
676
    RuntimeMode = FullRuntimeMode;
95
676
  }
96
865
  ~ExecutionRuntimeModesRAII() {
97
865
    ExecMode = SavedExecMode;
98
865
    if (RuntimeMode)
99
676
      *RuntimeMode = SavedRuntimeMode;
100
865
  }
101
};
102
103
/// GPU Configuration:  This information can be derived from cuda registers,
104
/// however, providing compile time constants helps generate more efficient
105
/// code.  For all practical purposes this is fine because the configuration
106
/// is the same for all known NVPTX architectures.
107
enum MachineConfiguration : unsigned {
108
  /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
109
  /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2,
110
  /// and GV_Warp_Size_Log2_Mask.
111
112
  /// Global memory alignment for performance.
113
  GlobalMemoryAlignment = 128,
114
115
  /// Maximal size of the shared memory buffer.
116
  SharedMemorySize = 128,
117
};
118
119
70
static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
120
70
  RefExpr = RefExpr->IgnoreParens();
121
70
  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
122
0
    const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
123
0
    while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
124
0
      Base = TempASE->getBase()->IgnoreParenImpCasts();
125
0
    RefExpr = Base;
126
70
  } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
127
0
    const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
128
0
    while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
129
0
      Base = TempOASE->getBase()->IgnoreParenImpCasts();
130
0
    while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
131
0
      Base = TempASE->getBase()->IgnoreParenImpCasts();
132
0
    RefExpr = Base;
133
0
  }
134
70
  RefExpr = RefExpr->IgnoreParenImpCasts();
135
70
  if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
136
70
    return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
137
0
  const auto *ME = cast<MemberExpr>(RefExpr);
138
0
  return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
139
0
}
140
141
142
static RecordDecl *buildRecordForGlobalizedVars(
143
    ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
144
    ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
145
    llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
146
857
        &MappedDeclsFields, int BufSize) {
147
857
  using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
148
857
  if (EscapedDecls.empty() && 
EscapedDeclsForTeams.empty()825
)
149
690
    return nullptr;
150
167
  SmallVector<VarsDataTy, 4> GlobalizedVars;
151
167
  for (const ValueDecl *D : EscapedDecls)
152
44
    GlobalizedVars.emplace_back(
153
44
        CharUnits::fromQuantity(std::max(
154
44
            C.getDeclAlign(D).getQuantity(),
155
44
            static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
156
44
        D);
157
167
  for (const ValueDecl *D : EscapedDeclsForTeams)
158
165
    GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
159
45
  llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
160
45
    return L.first > R.first;
161
45
  });
162
163
  // Build struct _globalized_locals_ty {
164
  //         /*  globalized vars  */[WarSize] align (max(decl_align,
165
  //         GlobalMemoryAlignment))
166
  //         /*  globalized vars  */ for EscapedDeclsForTeams
167
  //       };
168
167
  RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
169
167
  GlobalizedRD->startDefinition();
170
167
  llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
171
167
      EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
172
209
  for (const auto &Pair : GlobalizedVars) {
173
209
    const ValueDecl *VD = Pair.second;
174
209
    QualType Type = VD->getType();
175
209
    if (Type->isLValueReferenceType())
176
0
      Type = C.getPointerType(Type.getNonReferenceType());
177
209
    else
178
209
      Type = Type.getNonReferenceType();
179
209
    SourceLocation Loc = VD->getLocation();
180
209
    FieldDecl *Field;
181
209
    if (SingleEscaped.count(VD)) {
182
165
      Field = FieldDecl::Create(
183
165
          C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
184
165
          C.getTrivialTypeSourceInfo(Type, SourceLocation()),
185
165
          /*BW=*/nullptr, /*Mutable=*/false,
186
165
          /*InitStyle=*/ICIS_NoInit);
187
165
      Field->setAccess(AS_public);
188
165
      if (VD->hasAttrs()) {
189
18
        for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
190
18
             E(VD->getAttrs().end());
191
18
             I != E; 
++I0
)
192
0
          Field->addAttr(*I);
193
18
      }
194
44
    } else {
195
44
      llvm::APInt ArraySize(32, BufSize);
196
44
      Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
197
44
                                    0);
198
44
      Field = FieldDecl::Create(
199
44
          C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
200
44
          C.getTrivialTypeSourceInfo(Type, SourceLocation()),
201
44
          /*BW=*/nullptr, /*Mutable=*/false,
202
44
          /*InitStyle=*/ICIS_NoInit);
203
44
      Field->setAccess(AS_public);
204
44
      llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
205
44
                                     static_cast<CharUnits::QuantityType>(
206
44
                                         GlobalMemoryAlignment)));
207
44
      Field->addAttr(AlignedAttr::CreateImplicit(
208
44
          C, /*IsAlignmentExpr=*/true,
209
44
          IntegerLiteral::Create(C, Align,
210
44
                                 C.getIntTypeForBitwidth(32, /*Signed=*/0),
211
44
                                 SourceLocation()),
212
44
          {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
213
44
    }
214
209
    GlobalizedRD->addDecl(Field);
215
209
    MappedDeclsFields.try_emplace(VD, Field);
216
209
  }
217
167
  GlobalizedRD->completeDefinition();
218
167
  return GlobalizedRD;
219
167
}
220
221
/// Get the list of variables that can escape their declaration context.
222
class CheckVarsEscapingDeclContext final
223
    : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
224
  CodeGenFunction &CGF;
225
  llvm::SetVector<const ValueDecl *> EscapedDecls;
226
  llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
227
  llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
228
  RecordDecl *GlobalizedRD = nullptr;
229
  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
230
  bool AllEscaped = false;
231
  bool IsForCombinedParallelRegion = false;
232
233
269
  void markAsEscaped(const ValueDecl *VD) {
234
    // Do not globalize declare target variables.
235
269
    if (!isa<VarDecl>(VD) ||
236
269
        OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
237
0
      return;
238
269
    VD = cast<ValueDecl>(VD->getCanonicalDecl());
239
    // Use user-specified allocation.
240
269
    if (VD->hasAttrs() && 
VD->hasAttr<OMPAllocateDeclAttr>()0
)
241
0
      return;
242
    // Variables captured by value must be globalized.
243
269
    if (auto *CSI = CGF.CapturedStmtInfo) {
244
230
      if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
245
        // Check if need to capture the variable that was already captured by
246
        // value in the outer region.
247
171
        if (!IsForCombinedParallelRegion) {
248
171
          if (!FD->hasAttrs())
249
2
            return;
250
169
          const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
251
169
          if (!Attr)
252
0
            return;
253
169
          if (((Attr->getCaptureKind() != OMPC_map) &&
254
76
               !isOpenMPPrivate(Attr->getCaptureKind())) ||
255
169
              ((Attr->getCaptureKind() == OMPC_map) &&
256
93
               !FD->getType()->isAnyPointerType()))
257
93
            return;
258
76
        }
259
76
        if (!FD->getType()->isReferenceType()) {
260
50
          assert(!VD->getType()->isVariablyModifiedType() &&
261
50
                 "Parameter captured by value with variably modified type");
262
50
          EscapedParameters.insert(VD);
263
26
        } else if (!IsForCombinedParallelRegion) {
264
26
          return;
265
26
        }
266
148
      }
267
230
    }
268
148
    if ((!CGF.CapturedStmtInfo ||
269
109
         (IsForCombinedParallelRegion && 
CGF.CapturedStmtInfo0
)) &&
270
39
        VD->getType()->isReferenceType())
271
      // Do not globalize variables with reference type.
272
18
      return;
273
130
    if (VD->getType()->isVariablyModifiedType())
274
0
      EscapedVariableLengthDecls.insert(VD);
275
130
    else
276
130
      EscapedDecls.insert(VD);
277
130
  }
278
279
259
  void VisitValueDecl(const ValueDecl *VD) {
280
259
    if (VD->getType()->isLValueReferenceType())
281
0
      markAsEscaped(VD);
282
259
    if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
283
259
      if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
284
224
        const bool SavedAllEscaped = AllEscaped;
285
224
        AllEscaped = VD->getType()->isLValueReferenceType();
286
224
        Visit(VarD->getInit());
287
224
        AllEscaped = SavedAllEscaped;
288
224
      }
289
259
    }
290
259
  }
291
  void VisitOpenMPCapturedStmt(const CapturedStmt *S,
292
                               ArrayRef<OMPClause *> Clauses,
293
105
                               bool IsCombinedParallelRegion) {
294
105
    if (!S)
295
0
      return;
296
105
    for (const CapturedStmt::Capture &C : S->captures()) {
297
86
      if (C.capturesVariable() && !C.capturesVariableByCopy()) {
298
86
        const ValueDecl *VD = C.getCapturedVar();
299
86
        bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
300
86
        if (IsCombinedParallelRegion) {
301
          // Check if the variable is privatized in the combined construct and
302
          // those private copies must be shared in the inner parallel
303
          // directive.
304
0
          IsForCombinedParallelRegion = false;
305
0
          for (const OMPClause *C : Clauses) {
306
0
            if (!isOpenMPPrivate(C->getClauseKind()) ||
307
0
                C->getClauseKind() == OMPC_reduction ||
308
0
                C->getClauseKind() == OMPC_linear ||
309
0
                C->getClauseKind() == OMPC_private)
310
0
              continue;
311
0
            ArrayRef<const Expr *> Vars;
312
0
            if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
313
0
              Vars = PC->getVarRefs();
314
0
            else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
315
0
              Vars = PC->getVarRefs();
316
0
            else
317
0
              llvm_unreachable("Unexpected clause.");
318
0
            for (const auto *E : Vars) {
319
0
              const Decl *D =
320
0
                  cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
321
0
              if (D == VD->getCanonicalDecl()) {
322
0
                IsForCombinedParallelRegion = true;
323
0
                break;
324
0
              }
325
0
            }
326
0
            if (IsForCombinedParallelRegion)
327
0
              break;
328
0
          }
329
0
        }
330
86
        markAsEscaped(VD);
331
86
        if (isa<OMPCapturedExprDecl>(VD))
332
0
          VisitValueDecl(VD);
333
86
        IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
334
86
      }
335
86
    }
336
105
  }
337
338
787
  void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
339
787
    assert(!GlobalizedRD &&
340
787
           "Record for globalized variables is built already.");
341
787
    ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
342
787
    unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
343
787
    if (IsInTTDRegion)
344
300
      EscapedDeclsForTeams = EscapedDecls.getArrayRef();
345
487
    else
346
487
      EscapedDeclsForParallel = EscapedDecls.getArrayRef();
347
787
    GlobalizedRD = ::buildRecordForGlobalizedVars(
348
787
        CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
349
787
        MappedDeclsFields, WarpSize);
350
787
  }
351
352
public:
353
  CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
354
                               ArrayRef<const ValueDecl *> TeamsReductions)
355
787
      : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
356
787
  }
357
787
  virtual ~CheckVarsEscapingDeclContext() = default;
358
258
  void VisitDeclStmt(const DeclStmt *S) {
359
258
    if (!S)
360
0
      return;
361
258
    for (const Decl *D : S->decls())
362
264
      if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
363
259
        VisitValueDecl(VD);
364
258
  }
365
118
  void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
366
118
    if (!D)
367
0
      return;
368
118
    if (!D->hasAssociatedStmt())
369
6
      return;
370
112
    if (const auto *S =
371
106
            dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
372
      // Do not analyze directives that do not actually require capturing,
373
      // like `omp for` or `omp simd` directives.
374
106
      llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
375
106
      getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
376
106
      if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
377
1
        VisitStmt(S->getCapturedStmt());
378
1
        return;
379
1
      }
380
105
      VisitOpenMPCapturedStmt(
381
105
          S, D->clauses(),
382
105
          CaptureRegions.back() == OMPD_parallel &&
383
77
              isOpenMPDistributeDirective(D->getDirectiveKind()));
384
105
    }
385
112
  }
386
27
  void VisitCapturedStmt(const CapturedStmt *S) {
387
27
    if (!S)
388
0
      return;
389
27
    for (const CapturedStmt::Capture &C : S->captures()) {
390
7
      if (C.capturesVariable() && 
!C.capturesVariableByCopy()1
) {
391
1
        const ValueDecl *VD = C.getCapturedVar();
392
1
        markAsEscaped(VD);
393
1
        if (isa<OMPCapturedExprDecl>(VD))
394
0
          VisitValueDecl(VD);
395
1
      }
396
7
    }
397
27
  }
398
1
  void VisitLambdaExpr(const LambdaExpr *E) {
399
1
    if (!E)
400
0
      return;
401
1
    for (const LambdaCapture &C : E->captures()) {
402
0
      if (C.capturesVariable()) {
403
0
        if (C.getCaptureKind() == LCK_ByRef) {
404
0
          const ValueDecl *VD = C.getCapturedVar();
405
0
          markAsEscaped(VD);
406
0
          if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
407
0
            VisitValueDecl(VD);
408
0
        }
409
0
      }
410
0
    }
411
1
  }
412
0
  void VisitBlockExpr(const BlockExpr *E) {
413
0
    if (!E)
414
0
      return;
415
0
    for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
416
0
      if (C.isByRef()) {
417
0
        const VarDecl *VD = C.getVariable();
418
0
        markAsEscaped(VD);
419
0
        if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
420
0
          VisitValueDecl(VD);
421
0
      }
422
0
    }
423
0
  }
424
1.25k
  void VisitCallExpr(const CallExpr *E) {
425
1.25k
    if (!E)
426
0
      return;
427
1.37k
    
for (const Expr *Arg : E->arguments())1.25k
{
428
1.37k
      if (!Arg)
429
0
        continue;
430
1.37k
      if (Arg->isLValue()) {
431
85
        const bool SavedAllEscaped = AllEscaped;
432
85
        AllEscaped = true;
433
85
        Visit(Arg);
434
85
        AllEscaped = SavedAllEscaped;
435
1.28k
      } else {
436
1.28k
        Visit(Arg);
437
1.28k
      }
438
1.37k
    }
439
1.25k
    Visit(E->getCallee());
440
1.25k
  }
441
3.54k
  void VisitDeclRefExpr(const DeclRefExpr *E) {
442
3.54k
    if (!E)
443
0
      return;
444
3.54k
    const ValueDecl *VD = E->getDecl();
445
3.54k
    if (AllEscaped)
446
182
      markAsEscaped(VD);
447
3.54k
    if (isa<OMPCapturedExprDecl>(VD))
448
0
      VisitValueDecl(VD);
449
3.54k
    else if (const auto *VarD = dyn_cast<VarDecl>(VD))
450
2.37k
      if (VarD->isInitCapture())
451
0
        VisitValueDecl(VD);
452
3.54k
  }
453
271
  void VisitUnaryOperator(const UnaryOperator *E) {
454
271
    if (!E)
455
0
      return;
456
271
    if (E->getOpcode() == UO_AddrOf) {
457
52
      const bool SavedAllEscaped = AllEscaped;
458
52
      AllEscaped = true;
459
52
      Visit(E->getSubExpr());
460
52
      AllEscaped = SavedAllEscaped;
461
219
    } else {
462
219
      Visit(E->getSubExpr());
463
219
    }
464
271
  }
465
3.45k
  void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
466
3.45k
    if (!E)
467
0
      return;
468
3.45k
    if (E->getCastKind() == CK_ArrayToPointerDecay) {
469
76
      const bool SavedAllEscaped = AllEscaped;
470
76
      AllEscaped = true;
471
76
      Visit(E->getSubExpr());
472
76
      AllEscaped = SavedAllEscaped;
473
3.37k
    } else {
474
3.37k
      Visit(E->getSubExpr());
475
3.37k
    }
476
3.45k
  }
477
3.21k
  void VisitExpr(const Expr *E) {
478
3.21k
    if (!E)
479
0
      return;
480
3.21k
    bool SavedAllEscaped = AllEscaped;
481
3.21k
    if (!E->isLValue())
482
2.45k
      AllEscaped = false;
483
3.21k
    for (const Stmt *Child : E->children())
484
3.91k
      if (Child)
485
3.91k
        Visit(Child);
486
3.21k
    AllEscaped = SavedAllEscaped;
487
3.21k
  }
488
1.33k
  void VisitStmt(const Stmt *S) {
489
1.33k
    if (!S)
490
0
      return;
491
1.33k
    for (const Stmt *Child : S->children())
492
2.20k
      if (Child)
493
2.18k
        Visit(Child);
494
1.33k
  }
495
496
  /// Returns the record that handles all the escaped local variables and used
497
  /// instead of their original storage.
498
787
  const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
499
787
    if (!GlobalizedRD)
500
787
      buildRecordForGlobalizedVars(IsInTTDRegion);
501
787
    return GlobalizedRD;
502
787
  }
503
504
  /// Returns the field in the globalized record for the escaped variable.
505
127
  const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
506
127
    assert(GlobalizedRD &&
507
127
           "Record for globalized variables must be generated already.");
508
127
    auto I = MappedDeclsFields.find(VD);
509
127
    if (I == MappedDeclsFields.end())
510
0
      return nullptr;
511
127
    return I->getSecond();
512
127
  }
513
514
  /// Returns the list of the escaped local variables/parameters.
515
97
  ArrayRef<const ValueDecl *> getEscapedDecls() const {
516
97
    return EscapedDecls.getArrayRef();
517
97
  }
518
519
  /// Checks if the escaped local variable is actually a parameter passed by
520
  /// value.
521
174
  const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
522
174
    return EscapedParameters;
523
174
  }
524
525
  /// Returns the list of the escaped variables with the variably modified
526
  /// types.
527
777
  ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
528
777
    return EscapedVariableLengthDecls.getArrayRef();
529
777
  }
530
};
531
} // anonymous namespace
532
533
/// Get the id of the warp in the block.
534
/// We assume that the warp size is 32, which is always the case
535
/// on the NVPTX device, to generate more efficient code.
536
39
static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
537
39
  CGBuilderTy &Bld = CGF.Builder;
538
39
  unsigned LaneIDBits =
539
39
      CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2);
540
39
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
541
39
  return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
542
39
}
543
544
/// Get the id of the current lane in the Warp.
545
/// We assume that the warp size is 32, which is always the case
546
/// on the NVPTX device, to generate more efficient code.
547
50
static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
548
50
  CGBuilderTy &Bld = CGF.Builder;
549
50
  unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue(
550
50
      llvm::omp::GV_Warp_Size_Log2_Mask);
551
50
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
552
50
  return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
553
50
                       "nvptx_lane_id");
554
50
}
555
556
/// Get the value of the thread_limit clause in the teams directive.
557
/// For the 'generic' execution mode, the runtime encodes thread_limit in
558
/// the launch parameters, always starting thread_limit+warpSize threads per
559
/// CTA. The threads in the last warp are reserved for master execution.
560
/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
561
static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
562
1.05k
                                   bool IsInSPMDExecutionMode = false) {
563
1.05k
  CGBuilderTy &Bld = CGF.Builder;
564
1.05k
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
565
1.05k
  return IsInSPMDExecutionMode
566
676
             ? RT.getGPUNumThreads(CGF)
567
378
             : Bld.CreateNUWSub(RT.getGPUNumThreads(CGF),
568
378
                                RT.getGPUWarpSize(CGF), "thread_limit");
569
1.05k
}
570
571
/// Get the thread id of the OMP master thread.
572
/// The master thread id is the first thread (lane) of the last warp in the
573
/// GPU block.  Warp size is assumed to be some power of 2.
574
/// Thread id is 0 indexed.
575
/// E.g: If NumThreads is 33, master id is 32.
576
///      If NumThreads is 64, master id is 32.
577
///      If NumThreads is 1024, master id is 992.
578
189
static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
579
189
  CGBuilderTy &Bld = CGF.Builder;
580
189
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
581
189
  llvm::Value *NumThreads = RT.getGPUNumThreads(CGF);
582
  // We assume that the warp size is a power of 2.
583
189
  llvm::Value *Mask = Bld.CreateNUWSub(RT.getGPUWarpSize(CGF), Bld.getInt32(1));
584
585
189
  return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
586
189
                       Bld.CreateNot(Mask), "master_tid");
587
189
}
588
589
CGOpenMPRuntimeGPU::WorkerFunctionState::WorkerFunctionState(
590
    CodeGenModule &CGM, SourceLocation Loc)
591
    : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
592
189
      Loc(Loc) {
593
189
  createWorkerFunction(CGM);
594
189
}
595
596
void CGOpenMPRuntimeGPU::WorkerFunctionState::createWorkerFunction(
597
189
    CodeGenModule &CGM) {
598
  // Create an worker function with no arguments.
599
600
189
  WorkerFn = llvm::Function::Create(
601
189
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
602
189
      /*placeholder=*/"_worker", &CGM.getModule());
603
189
  CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
604
189
  WorkerFn->setDoesNotRecurse();
605
189
}
606
607
CGOpenMPRuntimeGPU::ExecutionMode
608
7.91k
CGOpenMPRuntimeGPU::getExecutionMode() const {
609
7.91k
  return CurrentExecutionMode;
610
7.91k
}
611
612
static CGOpenMPRuntimeGPU::DataSharingMode
613
17.4k
getDataSharingMode(CodeGenModule &CGM) {
614
6.22k
  return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
615
11.2k
                                          : CGOpenMPRuntimeGPU::Generic;
616
17.4k
}
617
618
/// Check for inner (nested) SPMD construct, if any
619
static bool hasNestedSPMDDirective(ASTContext &Ctx,
620
681
                                   const OMPExecutableDirective &D) {
621
681
  const auto *CS = D.getInnermostCapturedStmt();
622
681
  const auto *Body =
623
681
      CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
624
681
  const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
625
626
681
  if (const auto *NestedDir =
627
533
          dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
628
533
    OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
629
533
    switch (D.getDirectiveKind()) {
630
308
    case OMPD_target:
631
308
      if (isOpenMPParallelDirective(DKind))
632
177
        return true;
633
131
      if (DKind == OMPD_teams) {
634
130
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
635
130
            /*IgnoreCaptured=*/true);
636
130
        if (!Body)
637
0
          return false;
638
130
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
639
130
        if (const auto *NND =
640
102
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
641
102
          DKind = NND->getDirectiveKind();
642
102
          if (isOpenMPParallelDirective(DKind))
643
102
            return true;
644
29
        }
645
130
      }
646
29
      return false;
647
225
    case OMPD_target_teams:
648
225
      return isOpenMPParallelDirective(DKind);
649
0
    case OMPD_target_simd:
650
0
    case OMPD_target_parallel:
651
0
    case OMPD_target_parallel_for:
652
0
    case OMPD_target_parallel_for_simd:
653
0
    case OMPD_target_teams_distribute:
654
0
    case OMPD_target_teams_distribute_simd:
655
0
    case OMPD_target_teams_distribute_parallel_for:
656
0
    case OMPD_target_teams_distribute_parallel_for_simd:
657
0
    case OMPD_parallel:
658
0
    case OMPD_for:
659
0
    case OMPD_parallel_for:
660
0
    case OMPD_parallel_master:
661
0
    case OMPD_parallel_sections:
662
0
    case OMPD_for_simd:
663
0
    case OMPD_parallel_for_simd:
664
0
    case OMPD_cancel:
665
0
    case OMPD_cancellation_point:
666
0
    case OMPD_ordered:
667
0
    case OMPD_threadprivate:
668
0
    case OMPD_allocate:
669
0
    case OMPD_task:
670
0
    case OMPD_simd:
671
0
    case OMPD_sections:
672
0
    case OMPD_section:
673
0
    case OMPD_single:
674
0
    case OMPD_master:
675
0
    case OMPD_critical:
676
0
    case OMPD_taskyield:
677
0
    case OMPD_barrier:
678
0
    case OMPD_taskwait:
679
0
    case OMPD_taskgroup:
680
0
    case OMPD_atomic:
681
0
    case OMPD_flush:
682
0
    case OMPD_depobj:
683
0
    case OMPD_scan:
684
0
    case OMPD_teams:
685
0
    case OMPD_target_data:
686
0
    case OMPD_target_exit_data:
687
0
    case OMPD_target_enter_data:
688
0
    case OMPD_distribute:
689
0
    case OMPD_distribute_simd:
690
0
    case OMPD_distribute_parallel_for:
691
0
    case OMPD_distribute_parallel_for_simd:
692
0
    case OMPD_teams_distribute:
693
0
    case OMPD_teams_distribute_simd:
694
0
    case OMPD_teams_distribute_parallel_for:
695
0
    case OMPD_teams_distribute_parallel_for_simd:
696
0
    case OMPD_target_update:
697
0
    case OMPD_declare_simd:
698
0
    case OMPD_declare_variant:
699
0
    case OMPD_begin_declare_variant:
700
0
    case OMPD_end_declare_variant:
701
0
    case OMPD_declare_target:
702
0
    case OMPD_end_declare_target:
703
0
    case OMPD_declare_reduction:
704
0
    case OMPD_declare_mapper:
705
0
    case OMPD_taskloop:
706
0
    case OMPD_taskloop_simd:
707
0
    case OMPD_master_taskloop:
708
0
    case OMPD_master_taskloop_simd:
709
0
    case OMPD_parallel_master_taskloop:
710
0
    case OMPD_parallel_master_taskloop_simd:
711
0
    case OMPD_requires:
712
0
    case OMPD_unknown:
713
0
    default:
714
0
      llvm_unreachable("Unexpected directive.");
715
148
    }
716
148
  }
717
718
148
  return false;
719
148
}
720
721
static bool supportsSPMDExecutionMode(ASTContext &Ctx,
722
1.57k
                                      const OMPExecutableDirective &D) {
723
1.57k
  OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
724
1.57k
  switch (DirectiveKind) {
725
681
  case OMPD_target:
726
681
  case OMPD_target_teams:
727
681
    return hasNestedSPMDDirective(Ctx, D);
728
877
  case OMPD_target_parallel:
729
877
  case OMPD_target_parallel_for:
730
877
  case OMPD_target_parallel_for_simd:
731
877
  case OMPD_target_teams_distribute_parallel_for:
732
877
  case OMPD_target_teams_distribute_parallel_for_simd:
733
877
  case OMPD_target_simd:
734
877
  case OMPD_target_teams_distribute_simd:
735
877
    return true;
736
12
  case OMPD_target_teams_distribute:
737
12
    return false;
738
0
  case OMPD_parallel:
739
0
  case OMPD_for:
740
0
  case OMPD_parallel_for:
741
0
  case OMPD_parallel_master:
742
0
  case OMPD_parallel_sections:
743
0
  case OMPD_for_simd:
744
0
  case OMPD_parallel_for_simd:
745
0
  case OMPD_cancel:
746
0
  case OMPD_cancellation_point:
747
0
  case OMPD_ordered:
748
0
  case OMPD_threadprivate:
749
0
  case OMPD_allocate:
750
0
  case OMPD_task:
751
0
  case OMPD_simd:
752
0
  case OMPD_sections:
753
0
  case OMPD_section:
754
0
  case OMPD_single:
755
0
  case OMPD_master:
756
0
  case OMPD_critical:
757
0
  case OMPD_taskyield:
758
0
  case OMPD_barrier:
759
0
  case OMPD_taskwait:
760
0
  case OMPD_taskgroup:
761
0
  case OMPD_atomic:
762
0
  case OMPD_flush:
763
0
  case OMPD_depobj:
764
0
  case OMPD_scan:
765
0
  case OMPD_teams:
766
0
  case OMPD_target_data:
767
0
  case OMPD_target_exit_data:
768
0
  case OMPD_target_enter_data:
769
0
  case OMPD_distribute:
770
0
  case OMPD_distribute_simd:
771
0
  case OMPD_distribute_parallel_for:
772
0
  case OMPD_distribute_parallel_for_simd:
773
0
  case OMPD_teams_distribute:
774
0
  case OMPD_teams_distribute_simd:
775
0
  case OMPD_teams_distribute_parallel_for:
776
0
  case OMPD_teams_distribute_parallel_for_simd:
777
0
  case OMPD_target_update:
778
0
  case OMPD_declare_simd:
779
0
  case OMPD_declare_variant:
780
0
  case OMPD_begin_declare_variant:
781
0
  case OMPD_end_declare_variant:
782
0
  case OMPD_declare_target:
783
0
  case OMPD_end_declare_target:
784
0
  case OMPD_declare_reduction:
785
0
  case OMPD_declare_mapper:
786
0
  case OMPD_taskloop:
787
0
  case OMPD_taskloop_simd:
788
0
  case OMPD_master_taskloop:
789
0
  case OMPD_master_taskloop_simd:
790
0
  case OMPD_parallel_master_taskloop:
791
0
  case OMPD_parallel_master_taskloop_simd:
792
0
  case OMPD_requires:
793
0
  case OMPD_unknown:
794
0
  default:
795
0
    break;
796
0
  }
797
0
  llvm_unreachable(
798
0
      "Unknown programming model for OpenMP directive on NVPTX target.");
799
0
}
800
801
/// Check if the directive is loops based and has schedule clause at all or has
802
/// static scheduling.
803
358
static bool hasStaticScheduling(const OMPExecutableDirective &D) {
804
358
  assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
805
358
         isOpenMPLoopDirective(D.getDirectiveKind()) &&
806
358
         "Expected loop-based directive.");
807
358
  return !D.hasClausesOfKind<OMPOrderedClause>() &&
808
355
         (!D.hasClausesOfKind<OMPScheduleClause>() ||
809
196
          llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
810
196
                       [](const OMPScheduleClause *C) {
811
196
                         return C->getScheduleKind() == OMPC_SCHEDULE_static;
812
196
                       }));
813
358
}
814
815
/// Check for inner (nested) lightweight runtime construct, if any
816
static bool hasNestedLightweightDirective(ASTContext &Ctx,
817
218
                                          const OMPExecutableDirective &D) {
818
218
  assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
819
218
  const auto *CS = D.getInnermostCapturedStmt();
820
218
  const auto *Body =
821
218
      CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
822
218
  const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
823
824
218
  if (const auto *NestedDir =
825
154
          dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
826
154
    OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
827
154
    switch (D.getDirectiveKind()) {
828
72
    case OMPD_target:
829
72
      if (isOpenMPParallelDirective(DKind) &&
830
45
          isOpenMPWorksharingDirective(DKind) && 
isOpenMPLoopDirective(DKind)21
&&
831
21
          hasStaticScheduling(*NestedDir))
832
9
        return true;
833
63
      if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
834
0
        return true;
835
63
      if (DKind == OMPD_parallel) {
836
24
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
837
24
            /*IgnoreCaptured=*/true);
838
24
        if (!Body)
839
0
          return false;
840
24
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
841
24
        if (const auto *NND =
842
21
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
843
21
          DKind = NND->getDirectiveKind();
844
21
          if (isOpenMPWorksharingDirective(DKind) &&
845
21
              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
846
6
            return true;
847
39
        }
848
39
      } else if (DKind == OMPD_teams) {
849
27
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
850
27
            /*IgnoreCaptured=*/true);
851
27
        if (!Body)
852
0
          return false;
853
27
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
854
27
        if (const auto *NND =
855
27
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
856
27
          DKind = NND->getDirectiveKind();
857
27
          if (isOpenMPParallelDirective(DKind) &&
858
27
              isOpenMPWorksharingDirective(DKind) &&
859
21
              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
860
9
            return true;
861
18
          if (DKind == OMPD_parallel) {
862
6
            Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
863
6
                /*IgnoreCaptured=*/true);
864
6
            if (!Body)
865
0
              return false;
866
6
            ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
867
6
            if (const auto *NND =
868
0
                    dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
869
0
              DKind = NND->getDirectiveKind();
870
0
              if (isOpenMPWorksharingDirective(DKind) &&
871
0
                  isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
872
0
                return true;
873
48
            }
874
6
          }
875
18
        }
876
27
      }
877
48
      return false;
878
61
    case OMPD_target_teams:
879
61
      if (isOpenMPParallelDirective(DKind) &&
880
61
          isOpenMPWorksharingDirective(DKind) && 
isOpenMPLoopDirective(DKind)54
&&
881
54
          hasStaticScheduling(*NestedDir))
882
30
        return true;
883
31
      if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
884
0
        return true;
885
31
      if (DKind == OMPD_parallel) {
886
7
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
887
7
            /*IgnoreCaptured=*/true);
888
7
        if (!Body)
889
0
          return false;
890
7
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
891
7
        if (const auto *NND =
892
3
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
893
3
          DKind = NND->getDirectiveKind();
894
3
          if (isOpenMPWorksharingDirective(DKind) &&
895
0
              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
896
0
            return true;
897
31
        }
898
7
      }
899
31
      return false;
900
21
    case OMPD_target_parallel:
901
21
      if (DKind == OMPD_simd)
902
0
        return true;
903
21
      return isOpenMPWorksharingDirective(DKind) &&
904
21
             isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
905
0
    case OMPD_target_teams_distribute:
906
0
    case OMPD_target_simd:
907
0
    case OMPD_target_parallel_for:
908
0
    case OMPD_target_parallel_for_simd:
909
0
    case OMPD_target_teams_distribute_simd:
910
0
    case OMPD_target_teams_distribute_parallel_for:
911
0
    case OMPD_target_teams_distribute_parallel_for_simd:
912
0
    case OMPD_parallel:
913
0
    case OMPD_for:
914
0
    case OMPD_parallel_for:
915
0
    case OMPD_parallel_master:
916
0
    case OMPD_parallel_sections:
917
0
    case OMPD_for_simd:
918
0
    case OMPD_parallel_for_simd:
919
0
    case OMPD_cancel:
920
0
    case OMPD_cancellation_point:
921
0
    case OMPD_ordered:
922
0
    case OMPD_threadprivate:
923
0
    case OMPD_allocate:
924
0
    case OMPD_task:
925
0
    case OMPD_simd:
926
0
    case OMPD_sections:
927
0
    case OMPD_section:
928
0
    case OMPD_single:
929
0
    case OMPD_master:
930
0
    case OMPD_critical:
931
0
    case OMPD_taskyield:
932
0
    case OMPD_barrier:
933
0
    case OMPD_taskwait:
934
0
    case OMPD_taskgroup:
935
0
    case OMPD_atomic:
936
0
    case OMPD_flush:
937
0
    case OMPD_depobj:
938
0
    case OMPD_scan:
939
0
    case OMPD_teams:
940
0
    case OMPD_target_data:
941
0
    case OMPD_target_exit_data:
942
0
    case OMPD_target_enter_data:
943
0
    case OMPD_distribute:
944
0
    case OMPD_distribute_simd:
945
0
    case OMPD_distribute_parallel_for:
946
0
    case OMPD_distribute_parallel_for_simd:
947
0
    case OMPD_teams_distribute:
948
0
    case OMPD_teams_distribute_simd:
949
0
    case OMPD_teams_distribute_parallel_for:
950
0
    case OMPD_teams_distribute_parallel_for_simd:
951
0
    case OMPD_target_update:
952
0
    case OMPD_declare_simd:
953
0
    case OMPD_declare_variant:
954
0
    case OMPD_begin_declare_variant:
955
0
    case OMPD_end_declare_variant:
956
0
    case OMPD_declare_target:
957
0
    case OMPD_end_declare_target:
958
0
    case OMPD_declare_reduction:
959
0
    case OMPD_declare_mapper:
960
0
    case OMPD_taskloop:
961
0
    case OMPD_taskloop_simd:
962
0
    case OMPD_master_taskloop:
963
0
    case OMPD_master_taskloop_simd:
964
0
    case OMPD_parallel_master_taskloop:
965
0
    case OMPD_parallel_master_taskloop_simd:
966
0
    case OMPD_requires:
967
0
    case OMPD_unknown:
968
0
    default:
969
0
      llvm_unreachable("Unexpected directive.");
970
64
    }
971
64
  }
972
973
64
  return false;
974
64
}
975
976
/// Checks if the construct supports lightweight runtime. It must be SPMD
977
/// construct + inner loop-based construct with static scheduling.
978
static bool supportsLightweightRuntime(ASTContext &Ctx,
979
487
                                       const OMPExecutableDirective &D) {
980
487
  if (!supportsSPMDExecutionMode(Ctx, D))
981
0
    return false;
982
487
  OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
983
487
  switch (DirectiveKind) {
984
218
  case OMPD_target:
985
218
  case OMPD_target_teams:
986
218
  case OMPD_target_parallel:
987
218
    return hasNestedLightweightDirective(Ctx, D);
988
220
  case OMPD_target_parallel_for:
989
220
  case OMPD_target_parallel_for_simd:
990
220
  case OMPD_target_teams_distribute_parallel_for:
991
220
  case OMPD_target_teams_distribute_parallel_for_simd:
992
    // (Last|First)-privates must be shared in parallel region.
993
220
    return hasStaticScheduling(D);
994
49
  case OMPD_target_simd:
995
49
  case OMPD_target_teams_distribute_simd:
996
49
    return true;
997
0
  case OMPD_target_teams_distribute:
998
0
    return false;
999
0
  case OMPD_parallel:
1000
0
  case OMPD_for:
1001
0
  case OMPD_parallel_for:
1002
0
  case OMPD_parallel_master:
1003
0
  case OMPD_parallel_sections:
1004
0
  case OMPD_for_simd:
1005
0
  case OMPD_parallel_for_simd:
1006
0
  case OMPD_cancel:
1007
0
  case OMPD_cancellation_point:
1008
0
  case OMPD_ordered:
1009
0
  case OMPD_threadprivate:
1010
0
  case OMPD_allocate:
1011
0
  case OMPD_task:
1012
0
  case OMPD_simd:
1013
0
  case OMPD_sections:
1014
0
  case OMPD_section:
1015
0
  case OMPD_single:
1016
0
  case OMPD_master:
1017
0
  case OMPD_critical:
1018
0
  case OMPD_taskyield:
1019
0
  case OMPD_barrier:
1020
0
  case OMPD_taskwait:
1021
0
  case OMPD_taskgroup:
1022
0
  case OMPD_atomic:
1023
0
  case OMPD_flush:
1024
0
  case OMPD_depobj:
1025
0
  case OMPD_scan:
1026
0
  case OMPD_teams:
1027
0
  case OMPD_target_data:
1028
0
  case OMPD_target_exit_data:
1029
0
  case OMPD_target_enter_data:
1030
0
  case OMPD_distribute:
1031
0
  case OMPD_distribute_simd:
1032
0
  case OMPD_distribute_parallel_for:
1033
0
  case OMPD_distribute_parallel_for_simd:
1034
0
  case OMPD_teams_distribute:
1035
0
  case OMPD_teams_distribute_simd:
1036
0
  case OMPD_teams_distribute_parallel_for:
1037
0
  case OMPD_teams_distribute_parallel_for_simd:
1038
0
  case OMPD_target_update:
1039
0
  case OMPD_declare_simd:
1040
0
  case OMPD_declare_variant:
1041
0
  case OMPD_begin_declare_variant:
1042
0
  case OMPD_end_declare_variant:
1043
0
  case OMPD_declare_target:
1044
0
  case OMPD_end_declare_target:
1045
0
  case OMPD_declare_reduction:
1046
0
  case OMPD_declare_mapper:
1047
0
  case OMPD_taskloop:
1048
0
  case OMPD_taskloop_simd:
1049
0
  case OMPD_master_taskloop:
1050
0
  case OMPD_master_taskloop_simd:
1051
0
  case OMPD_parallel_master_taskloop:
1052
0
  case OMPD_parallel_master_taskloop_simd:
1053
0
  case OMPD_requires:
1054
0
  case OMPD_unknown:
1055
0
  default:
1056
0
    break;
1057
0
  }
1058
0
  llvm_unreachable(
1059
0
      "Unknown programming model for OpenMP directive on NVPTX target.");
1060
0
}
1061
1062
void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1063
                                             StringRef ParentName,
1064
                                             llvm::Function *&OutlinedFn,
1065
                                             llvm::Constant *&OutlinedFnID,
1066
                                             bool IsOffloadEntry,
1067
189
                                             const RegionCodeGenTy &CodeGen) {
1068
189
  ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1069
189
  EntryFunctionState EST;
1070
189
  WorkerFunctionState WST(CGM, D.getBeginLoc());
1071
189
  Work.clear();
1072
189
  WrapperFunctionsMap.clear();
1073
1074
  // Emit target region as a standalone region.
1075
189
  class NVPTXPrePostActionTy : public PrePostActionTy {
1076
189
    CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1077
189
    CGOpenMPRuntimeGPU::WorkerFunctionState &WST;
1078
1079
189
  public:
1080
189
    NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1081
189
                         CGOpenMPRuntimeGPU::WorkerFunctionState &WST)
1082
189
        : EST(EST), WST(WST) {}
1083
189
    void Enter(CodeGenFunction &CGF) override {
1084
189
      auto &RT =
1085
189
          static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1086
189
      RT.emitNonSPMDEntryHeader(CGF, EST, WST);
1087
      // Skip target region initialization.
1088
189
      RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1089
189
    }
1090
189
    void Exit(CodeGenFunction &CGF) override {
1091
189
      auto &RT =
1092
189
          static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1093
189
      RT.clearLocThreadIdInsertPt(CGF);
1094
189
      RT.emitNonSPMDEntryFooter(CGF, EST);
1095
189
    }
1096
189
  } Action(EST, WST);
1097
189
  CodeGen.setAction(Action);
1098
189
  IsInTTDRegion = true;
1099
  // Reserve place for the globalized memory.
1100
189
  GlobalizedRecords.emplace_back();
1101
189
  if (!KernelStaticGlobalized) {
1102
99
    KernelStaticGlobalized = new llvm::GlobalVariable(
1103
99
        CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1104
99
        llvm::GlobalValue::InternalLinkage,
1105
99
        llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1106
99
        "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1107
99
        llvm::GlobalValue::NotThreadLocal,
1108
99
        CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1109
99
  }
1110
189
  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1111
189
                                   IsOffloadEntry, CodeGen);
1112
189
  IsInTTDRegion = false;
1113
1114
  // Now change the name of the worker function to correspond to this target
1115
  // region's entry function.
1116
189
  WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
1117
1118
  // Create the worker function
1119
189
  emitWorkerFunction(WST);
1120
189
}
1121
1122
// Setup NVPTX threads for master-worker OpenMP scheme.
1123
void CGOpenMPRuntimeGPU::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
1124
                                                  EntryFunctionState &EST,
1125
189
                                                  WorkerFunctionState &WST) {
1126
189
  CGBuilderTy &Bld = CGF.Builder;
1127
1128
189
  llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
1129
189
  llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1130
189
  llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
1131
189
  EST.ExitBB = CGF.createBasicBlock(".exit");
1132
1133
189
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1134
189
  llvm::Value *IsWorker =
1135
189
      Bld.CreateICmpULT(RT.getGPUThreadID(CGF), getThreadLimit(CGF));
1136
189
  Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
1137
1138
189
  CGF.EmitBlock(WorkerBB);
1139
189
  emitCall(CGF, WST.Loc, WST.WorkerFn);
1140
189
  CGF.EmitBranch(EST.ExitBB);
1141
1142
189
  CGF.EmitBlock(MasterCheckBB);
1143
189
  llvm::Value *IsMaster =
1144
189
      Bld.CreateICmpEQ(RT.getGPUThreadID(CGF), getMasterThreadID(CGF));
1145
189
  Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
1146
1147
189
  CGF.EmitBlock(MasterBB);
1148
189
  IsInTargetMasterThreadRegion = true;
1149
  // SEQUENTIAL (MASTER) REGION START
1150
  // First action in sequential region:
1151
  // Initialize the state of the OpenMP runtime library on the GPU.
1152
  // TODO: Optimize runtime initialization and pass in correct value.
1153
189
  llvm::Value *Args[] = {getThreadLimit(CGF),
1154
189
                         Bld.getInt16(/*RequiresOMPRuntime=*/1)};
1155
189
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1156
189
                          CGM.getModule(), OMPRTL___kmpc_kernel_init),
1157
189
                      Args);
1158
1159
  // For data sharing, we need to initialize the stack.
1160
189
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1161
189
      CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack));
1162
1163
189
  emitGenericVarsProlog(CGF, WST.Loc);
1164
189
}
1165
1166
void CGOpenMPRuntimeGPU::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
1167
189
                                                  EntryFunctionState &EST) {
1168
189
  IsInTargetMasterThreadRegion = false;
1169
189
  if (!CGF.HaveInsertPoint())
1170
0
    return;
1171
1172
189
  emitGenericVarsEpilog(CGF);
1173
1174
189
  if (!EST.ExitBB)
1175
0
    EST.ExitBB = CGF.createBasicBlock(".exit");
1176
1177
189
  llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
1178
189
  CGF.EmitBranch(TerminateBB);
1179
1180
189
  CGF.EmitBlock(TerminateBB);
1181
  // Signal termination condition.
1182
  // TODO: Optimize runtime initialization and pass in correct value.
1183
189
  llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
1184
189
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1185
189
                          CGM.getModule(), OMPRTL___kmpc_kernel_deinit),
1186
189
                      Args);
1187
  // Barrier to terminate worker threads.
1188
189
  syncCTAThreads(CGF);
1189
  // Master thread jumps to exit point.
1190
189
  CGF.EmitBranch(EST.ExitBB);
1191
1192
189
  CGF.EmitBlock(EST.ExitBB);
1193
189
  EST.ExitBB = nullptr;
1194
189
}
1195
1196
void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1197
                                          StringRef ParentName,
1198
                                          llvm::Function *&OutlinedFn,
1199
                                          llvm::Constant *&OutlinedFnID,
1200
                                          bool IsOffloadEntry,
1201
676
                                          const RegionCodeGenTy &CodeGen) {
1202
676
  ExecutionRuntimeModesRAII ModeRAII(
1203
676
      CurrentExecutionMode, RequiresFullRuntime,
1204
676
      CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1205
487
          !supportsLightweightRuntime(CGM.getContext(), D));
1206
676
  EntryFunctionState EST;
1207
1208
  // Emit target region as a standalone region.
1209
676
  class NVPTXPrePostActionTy : public PrePostActionTy {
1210
676
    CGOpenMPRuntimeGPU &RT;
1211
676
    CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1212
676
    const OMPExecutableDirective &D;
1213
1214
676
  public:
1215
676
    NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1216
676
                         CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1217
676
                         const OMPExecutableDirective &D)
1218
676
        : RT(RT), EST(EST), D(D) {}
1219
676
    void Enter(CodeGenFunction &CGF) override {
1220
676
      RT.emitSPMDEntryHeader(CGF, EST, D);
1221
      // Skip target region initialization.
1222
676
      RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1223
676
    }
1224
676
    void Exit(CodeGenFunction &CGF) override {
1225
676
      RT.clearLocThreadIdInsertPt(CGF);
1226
676
      RT.emitSPMDEntryFooter(CGF, EST);
1227
676
    }
1228
676
  } Action(*this, EST, D);
1229
676
  CodeGen.setAction(Action);
1230
676
  IsInTTDRegion = true;
1231
  // Reserve place for the globalized memory.
1232
676
  GlobalizedRecords.emplace_back();
1233
676
  if (!KernelStaticGlobalized) {
1234
91
    KernelStaticGlobalized = new llvm::GlobalVariable(
1235
91
        CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1236
91
        llvm::GlobalValue::InternalLinkage,
1237
91
        llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1238
91
        "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1239
91
        llvm::GlobalValue::NotThreadLocal,
1240
91
        CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1241
91
  }
1242
676
  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1243
676
                                   IsOffloadEntry, CodeGen);
1244
676
  IsInTTDRegion = false;
1245
676
}
1246
1247
void CGOpenMPRuntimeGPU::emitSPMDEntryHeader(
1248
    CodeGenFunction &CGF, EntryFunctionState &EST,
1249
676
    const OMPExecutableDirective &D) {
1250
676
  CGBuilderTy &Bld = CGF.Builder;
1251
1252
  // Setup BBs in entry function.
1253
676
  llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1254
676
  EST.ExitBB = CGF.createBasicBlock(".exit");
1255
1256
676
  llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1257
                         /*RequiresOMPRuntime=*/
1258
380
                         Bld.getInt16(RequiresFullRuntime ? 1 : 
0296
)};
1259
676
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1260
676
                          CGM.getModule(), OMPRTL___kmpc_spmd_kernel_init),
1261
676
                      Args);
1262
1263
676
  if (RequiresFullRuntime) {
1264
    // For data sharing, we need to initialize the stack.
1265
380
    CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1266
380
        CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack_spmd));
1267
380
  }
1268
1269
676
  CGF.EmitBranch(ExecuteBB);
1270
1271
676
  CGF.EmitBlock(ExecuteBB);
1272
1273
676
  IsInTargetMasterThreadRegion = true;
1274
676
}
1275
1276
void CGOpenMPRuntimeGPU::emitSPMDEntryFooter(CodeGenFunction &CGF,
1277
676
                                               EntryFunctionState &EST) {
1278
676
  IsInTargetMasterThreadRegion = false;
1279
676
  if (!CGF.HaveInsertPoint())
1280
0
    return;
1281
1282
676
  if (!EST.ExitBB)
1283
0
    EST.ExitBB = CGF.createBasicBlock(".exit");
1284
1285
676
  llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1286
676
  CGF.EmitBranch(OMPDeInitBB);
1287
1288
676
  CGF.EmitBlock(OMPDeInitBB);
1289
  // DeInitialize the OMP state in the runtime; called by all active threads.
1290
676
  llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
1291
380
                         CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 
0296
)};
1292
676
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1293
676
                          CGM.getModule(), OMPRTL___kmpc_spmd_kernel_deinit_v2),
1294
676
                      Args);
1295
676
  CGF.EmitBranch(EST.ExitBB);
1296
1297
676
  CGF.EmitBlock(EST.ExitBB);
1298
676
  EST.ExitBB = nullptr;
1299
676
}
1300
1301
// Create a unique global variable to indicate the execution mode of this target
1302
// region. The execution mode is either 'generic', or 'spmd' depending on the
1303
// target directive. This variable is picked up by the offload library to setup
1304
// the device appropriately before kernel launch. If the execution mode is
1305
// 'generic', the runtime reserves one warp for the master, otherwise, all
1306
// warps participate in parallel work.
1307
static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1308
865
                                     bool Mode) {
1309
865
  auto *GVMode =
1310
865
      new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1311
865
                               llvm::GlobalValue::WeakAnyLinkage,
1312
676
                               llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 
1189
),
1313
865
                               Twine(Name, "_exec_mode"));
1314
865
  CGM.addCompilerUsedGlobal(GVMode);
1315
865
}
1316
1317
189
void CGOpenMPRuntimeGPU::emitWorkerFunction(WorkerFunctionState &WST) {
1318
189
  ASTContext &Ctx = CGM.getContext();
1319
1320
189
  CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1321
189
  CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1322
189
                    WST.Loc, WST.Loc);
1323
189
  emitWorkerLoop(CGF, WST);
1324
189
  CGF.FinishFunction();
1325
189
}
1326
1327
void CGOpenMPRuntimeGPU::emitWorkerLoop(CodeGenFunction &CGF,
1328
189
                                        WorkerFunctionState &WST) {
1329
  //
1330
  // The workers enter this loop and wait for parallel work from the master.
1331
  // When the master encounters a parallel region it sets up the work + variable
1332
  // arguments, and wakes up the workers.  The workers first check to see if
1333
  // they are required for the parallel region, i.e., within the # of requested
1334
  // parallel threads.  The activated workers load the variable arguments and
1335
  // execute the parallel work.
1336
  //
1337
1338
189
  CGBuilderTy &Bld = CGF.Builder;
1339
1340
189
  llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1341
189
  llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1342
189
  llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1343
189
  llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1344
189
  llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1345
189
  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1346
1347
189
  CGF.EmitBranch(AwaitBB);
1348
1349
  // Workers wait for work from master.
1350
189
  CGF.EmitBlock(AwaitBB);
1351
  // Wait for parallel work
1352
189
  syncCTAThreads(CGF);
1353
1354
189
  Address WorkFn =
1355
189
      CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1356
189
  Address ExecStatus =
1357
189
      CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1358
189
  CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1359
189
  CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1360
1361
  // TODO: Optimize runtime initialization and pass in correct value.
1362
189
  llvm::Value *Args[] = {WorkFn.getPointer()};
1363
189
  llvm::Value *Ret =
1364
189
      CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1365
189
                              CGM.getModule(), OMPRTL___kmpc_kernel_parallel),
1366
189
                          Args);
1367
189
  Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1368
1369
  // On termination condition (workid == 0), exit loop.
1370
189
  llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1371
189
  llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1372
189
  Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1373
1374
  // Activate requested workers.
1375
189
  CGF.EmitBlock(SelectWorkersBB);
1376
189
  llvm::Value *IsActive =
1377
189
      Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1378
189
  Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1379
1380
  // Signal start of parallel region.
1381
189
  CGF.EmitBlock(ExecuteBB);
1382
  // Skip initialization.
1383
189
  setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1384
1385
  // Process work items: outlined parallel functions.
1386
64
  for (llvm::Function *W : Work) {
1387
    // Try to match this outlined function.
1388
64
    llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1389
1390
64
    llvm::Value *WorkFnMatch =
1391
64
        Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1392
1393
64
    llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1394
64
    llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1395
64
    Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1396
1397
    // Execute this outlined function.
1398
64
    CGF.EmitBlock(ExecuteFNBB);
1399
1400
    // Insert call to work function via shared wrapper. The shared
1401
    // wrapper takes two arguments:
1402
    //   - the parallelism level;
1403
    //   - the thread ID;
1404
64
    emitCall(CGF, WST.Loc, W,
1405
64
             {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1406
1407
    // Go to end of parallel region.
1408
64
    CGF.EmitBranch(TerminateBB);
1409
1410
64
    CGF.EmitBlock(CheckNextBB);
1411
64
  }
1412
  // Default case: call to outlined function through pointer if the target
1413
  // region makes a declare target call that may contain an orphaned parallel
1414
  // directive.
1415
189
  auto *ParallelFnTy =
1416
189
      llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1417
189
                              /*isVarArg=*/false);
1418
189
  llvm::Value *WorkFnCast =
1419
189
      Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
1420
  // Insert call to work function via shared wrapper. The shared
1421
  // wrapper takes two arguments:
1422
  //   - the parallelism level;
1423
  //   - the thread ID;
1424
189
  emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
1425
189
           {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1426
  // Go to end of parallel region.
1427
189
  CGF.EmitBranch(TerminateBB);
1428
1429
  // Signal end of parallel region.
1430
189
  CGF.EmitBlock(TerminateBB);
1431
189
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1432
189
                          CGM.getModule(), OMPRTL___kmpc_kernel_end_parallel),
1433
189
                      llvm::None);
1434
189
  CGF.EmitBranch(BarrierBB);
1435
1436
  // All active and inactive workers wait at a barrier after parallel region.
1437
189
  CGF.EmitBlock(BarrierBB);
1438
  // Barrier after parallel region.
1439
189
  syncCTAThreads(CGF);
1440
189
  CGF.EmitBranch(AwaitBB);
1441
1442
  // Exit target region.
1443
189
  CGF.EmitBlock(ExitBB);
1444
  // Skip initialization.
1445
189
  clearLocThreadIdInsertPt(CGF);
1446
189
}
1447
1448
void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1449
                                              llvm::Constant *Addr,
1450
                                              uint64_t Size, int32_t,
1451
970
                                              llvm::GlobalValue::LinkageTypes) {
1452
  // TODO: Add support for global variables on the device after declare target
1453
  // support.
1454
970
  if (!isa<llvm::Function>(Addr))
1455
43
    return;
1456
927
  llvm::Module &M = CGM.getModule();
1457
927
  llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1458
1459
  // Get "nvvm.annotations" metadata node
1460
927
  llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1461
1462
927
  llvm::Metadata *MDVals[] = {
1463
927
      llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1464
927
      llvm::ConstantAsMetadata::get(
1465
927
          llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1466
  // Append metadata to nvvm.annotations
1467
927
  MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1468
927
}
1469
1470
void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1471
    const OMPExecutableDirective &D, StringRef ParentName,
1472
    llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1473
865
    bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1474
865
  if (!IsOffloadEntry) // Nothing to do.
1475
0
    return;
1476
1477
865
  assert(!ParentName.empty() && "Invalid target region parent name!");
1478
1479
865
  bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1480
865
  if (Mode)
1481
676
    emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1482
676
                   CodeGen);
1483
189
  else
1484
189
    emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1485
189
                      CodeGen);
1486
1487
865
  setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1488
865
}
1489
1490
namespace {
1491
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1492
/// Enum for accesseing the reserved_2 field of the ident_t struct.
1493
enum ModeFlagsTy : unsigned {
1494
  /// Bit set to 1 when in SPMD mode.
1495
  KMP_IDENT_SPMD_MODE = 0x01,
1496
  /// Bit set to 1 when a simplified runtime is used.
1497
  KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1498
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1499
};
1500
1501
/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1502
static const ModeFlagsTy UndefinedMode =
1503
    (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1504
} // anonymous namespace
1505
1506
3.20k
unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1507
3.20k
  switch (getExecutionMode()) {
1508
2.77k
  case EM_SPMD:
1509
2.77k
    if (requiresFullRuntime())
1510
1.50k
      return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1511
1.26k
    return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1512
407
  case EM_NonSPMD:
1513
407
    assert(requiresFullRuntime() && "Expected full runtime.");
1514
407
    return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1515
29
  case EM_Unknown:
1516
29
    return UndefinedMode;
1517
0
  }
1518
0
  llvm_unreachable("Unknown flags are requested.");
1519
0
}
1520
1521
CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1522
213
    : CGOpenMPRuntime(CGM, "_", "$") {
1523
213
  if (!CGM.getLangOpts().OpenMPIsDevice)
1524
0
    llvm_unreachable("OpenMP NVPTX can only handle device code.");
1525
213
}
1526
1527
void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1528
                                              ProcBindKind ProcBind,
1529
46
                                              SourceLocation Loc) {
1530
  // Do nothing in case of SPMD mode and L0 parallel.
1531
46
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1532
46
    return;
1533
1534
0
  CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1535
0
}
1536
1537
void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1538
                                                llvm::Value *NumThreads,
1539
15
                                                SourceLocation Loc) {
1540
  // Do nothing in case of SPMD mode and L0 parallel.
1541
15
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1542
15
    return;
1543
1544
0
  CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1545
0
}
1546
1547
void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1548
                                              const Expr *NumTeams,
1549
                                              const Expr *ThreadLimit,
1550
42
                                              SourceLocation Loc) {}
1551
1552
llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1553
    const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1554
707
    OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1555
  // Emit target region as a standalone region.
1556
707
  class NVPTXPrePostActionTy : public PrePostActionTy {
1557
707
    bool &IsInParallelRegion;
1558
707
    bool PrevIsInParallelRegion;
1559
1560
707
  public:
1561
707
    NVPTXPrePostActionTy(bool &IsInParallelRegion)
1562
707
        : IsInParallelRegion(IsInParallelRegion) {}
1563
707
    void Enter(CodeGenFunction &CGF) override {
1564
707
      PrevIsInParallelRegion = IsInParallelRegion;
1565
707
      IsInParallelRegion = true;
1566
707
    }
1567
707
    void Exit(CodeGenFunction &CGF) override {
1568
707
      IsInParallelRegion = PrevIsInParallelRegion;
1569
707
    }
1570
707
  } Action(IsInParallelRegion);
1571
707
  CodeGen.setAction(Action);
1572
707
  bool PrevIsInTTDRegion = IsInTTDRegion;
1573
707
  IsInTTDRegion = false;
1574
707
  bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1575
707
  IsInTargetMasterThreadRegion = false;
1576
707
  auto *OutlinedFun =
1577
707
      cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1578
707
          D, ThreadIDVar, InnermostKind, CodeGen));
1579
707
  if (CGM.getLangOpts().Optimize) {
1580
36
    OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
1581
36
    OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
1582
36
    OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
1583
36
  }
1584
707
  IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1585
707
  IsInTTDRegion = PrevIsInTTDRegion;
1586
707
  if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1587
74
      !IsInParallelRegion) {
1588
74
    llvm::Function *WrapperFun =
1589
74
        createParallelDataSharingWrapper(OutlinedFun, D);
1590
74
    WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1591
74
  }
1592
1593
707
  return OutlinedFun;
1594
707
}
1595
1596
/// Get list of lastprivate variables from the teams distribute ... or
1597
/// teams {distribute ...} directives.
1598
static void
1599
getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1600
413
                             llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1601
413
  assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1602
413
         "expected teams directive.");
1603
413
  const OMPExecutableDirective *Dir = &D;
1604
413
  if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1605
151
    if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1606
151
            Ctx,
1607
151
            D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1608
151
                /*IgnoreCaptured=*/true))) {
1609
151
      Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1610
151
      if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1611
13
        Dir = nullptr;
1612
151
    }
1613
151
  }
1614
413
  if (!Dir)
1615
13
    return;
1616
400
  for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1617
52
    for (const Expr *E : C->getVarRefs())
1618
52
      Vars.push_back(getPrivateItem(E));
1619
52
  }
1620
400
}
1621
1622
/// Get list of reduction variables from the teams ... directives.
1623
static void
1624
getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1625
54
                      llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1626
54
  assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1627
54
         "expected teams directive.");
1628
18
  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1629
18
    for (const Expr *E : C->privates())
1630
18
      Vars.push_back(getPrivateItem(E));
1631
18
  }
1632
54
}
1633
1634
llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1635
    const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1636
467
    OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1637
467
  SourceLocation Loc = D.getBeginLoc();
1638
1639
467
  const RecordDecl *GlobalizedRD = nullptr;
1640
467
  llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1641
467
  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1642
467
  unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
1643
  // Globalize team reductions variable unconditionally in all modes.
1644
467
  if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1645
54
    getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1646
467
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1647
413
    getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1648
413
    if (!LastPrivatesReductions.empty()) {
1649
52
      GlobalizedRD = ::buildRecordForGlobalizedVars(
1650
52
          CGM.getContext(), llvm::None, LastPrivatesReductions,
1651
52
          MappedDeclsFields, WarpSize);
1652
52
    }
1653
54
  } else if (!LastPrivatesReductions.empty()) {
1654
12
    assert(!TeamAndReductions.first &&
1655
12
           "Previous team declaration is not expected.");
1656
12
    TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1657
12
    std::swap(TeamAndReductions.second, LastPrivatesReductions);
1658
12
  }
1659
1660
  // Emit target region as a standalone region.
1661
467
  class NVPTXPrePostActionTy : public PrePostActionTy {
1662
467
    SourceLocation &Loc;
1663
467
    const RecordDecl *GlobalizedRD;
1664
467
    llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1665
467
        &MappedDeclsFields;
1666
1667
467
  public:
1668
467
    NVPTXPrePostActionTy(
1669
467
        SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1670
467
        llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1671
467
            &MappedDeclsFields)
1672
467
        : Loc(Loc), GlobalizedRD(GlobalizedRD),
1673
467
          MappedDeclsFields(MappedDeclsFields) {}
1674
467
    void Enter(CodeGenFunction &CGF) override {
1675
467
      auto &Rt =
1676
467
          static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1677
467
      if (GlobalizedRD) {
1678
52
        auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1679
52
        I->getSecond().GlobalRecord = GlobalizedRD;
1680
52
        I->getSecond().MappedParams =
1681
52
            std::make_unique<CodeGenFunction::OMPMapVars>();
1682
52
        DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1683
52
        for (const auto &Pair : MappedDeclsFields) {
1684
52
          assert(Pair.getFirst()->isCanonicalDecl() &&
1685
52
                 "Expected canonical declaration");
1686
52
          Data.insert(std::make_pair(Pair.getFirst(),
1687
52
                                     MappedVarData(Pair.getSecond(),
1688
52
                                                   /*IsOnePerTeam=*/true)));
1689
52
        }
1690
52
      }
1691
467
      Rt.emitGenericVarsProlog(CGF, Loc);
1692
467
    }
1693
467
    void Exit(CodeGenFunction &CGF) override {
1694
467
      static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1695
467
          .emitGenericVarsEpilog(CGF);
1696
467
    }
1697
467
  } Action(Loc, GlobalizedRD, MappedDeclsFields);
1698
467
  CodeGen.setAction(Action);
1699
467
  llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1700
467
      D, ThreadIDVar, InnermostKind, CodeGen);
1701
467
  if (CGM.getLangOpts().Optimize) {
1702
22
    OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
1703
22
    OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
1704
22
    OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
1705
22
  }
1706
1707
467
  return OutlinedFun;
1708
467
}
1709
1710
void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1711
                                                 SourceLocation Loc,
1712
667
                                                 bool WithSPMDCheck) {
1713
667
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1714
170
      getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1715
2
    return;
1716
1717
665
  CGBuilderTy &Bld = CGF.Builder;
1718
1719
665
  const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1720
665
  if (I == FunctionGlobalizedDecls.end())
1721
529
    return;
1722
136
  if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
1723
136
    QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
1724
136
    QualType SecGlobalRecTy;
1725
1726
    // Recover pointer to this function's global record. The runtime will
1727
    // handle the specifics of the allocation of the memory.
1728
    // Use actual memory size of the record including the padding
1729
    // for alignment purposes.
1730
136
    unsigned Alignment =
1731
136
        CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
1732
136
    unsigned GlobalRecordSize =
1733
136
        CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
1734
136
    GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
1735
1736
136
    llvm::PointerType *GlobalRecPtrTy =
1737
136
        CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
1738
136
    llvm::Value *GlobalRecCastAddr;
1739
136
    llvm::Value *IsTTD = nullptr;
1740
136
    if (!IsInTTDRegion &&
1741
11
        (WithSPMDCheck ||
1742
11
         
getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown0
)) {
1743
11
      llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1744
11
      llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
1745
11
      llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
1746
11
      if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
1747
10
        llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1748
10
        llvm::Value *ThreadID = getThreadID(CGF, Loc);
1749
10
        llvm::Value *PL = CGF.EmitRuntimeCall(
1750
10
            OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
1751
10
                                                  OMPRTL___kmpc_parallel_level),
1752
10
            {RTLoc, ThreadID});
1753
10
        IsTTD = Bld.CreateIsNull(PL);
1754
10
      }
1755
11
      llvm::Value *IsSPMD = Bld.CreateIsNotNull(
1756
11
          CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1757
11
              CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
1758
11
      Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
1759
      // There is no need to emit line number for unconditional branch.
1760
11
      (void)ApplyDebugLocation::CreateEmpty(CGF);
1761
11
      CGF.EmitBlock(SPMDBB);
1762
11
      Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
1763
11
                               CharUnits::fromQuantity(Alignment));
1764
11
      CGF.EmitBranch(ExitBB);
1765
      // There is no need to emit line number for unconditional branch.
1766
11
      (void)ApplyDebugLocation::CreateEmpty(CGF);
1767
11
      CGF.EmitBlock(NonSPMDBB);
1768
11
      llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
1769
11
      if (const RecordDecl *SecGlobalizedVarsRecord =
1770
10
              I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
1771
10
        SecGlobalRecTy =
1772
10
            CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
1773
1774
        // Recover pointer to this function's global record. The runtime will
1775
        // handle the specifics of the allocation of the memory.
1776
        // Use actual memory size of the record including the padding
1777
        // for alignment purposes.
1778
10
        unsigned Alignment =
1779
10
            CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
1780
10
        unsigned GlobalRecordSize =
1781
10
            CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
1782
10
        GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
1783
10
        Size = Bld.CreateSelect(
1784
10
            IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
1785
10
      }
1786
      // TODO: allow the usage of shared memory to be controlled by
1787
      // the user, for now, default to global.
1788
11
      llvm::Value *GlobalRecordSizeArg[] = {
1789
11
          Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1790
11
      llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1791
11
          OMPBuilder.getOrCreateRuntimeFunction(
1792
11
              CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1793
11
          GlobalRecordSizeArg);
1794
11
      GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1795
11
          GlobalRecValue, GlobalRecPtrTy);
1796
11
      CGF.EmitBlock(ExitBB);
1797
11
      auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
1798
11
                                /*NumReservedValues=*/2, "_select_stack");
1799
11
      Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
1800
11
      Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
1801
11
      GlobalRecCastAddr = Phi;
1802
11
      I->getSecond().GlobalRecordAddr = Phi;
1803
11
      I->getSecond().IsInSPMDModeFlag = IsSPMD;
1804
125
    } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && 
IsInTTDRegion79
) {
1805
79
      assert(GlobalizedRecords.back().Records.size() < 2 &&
1806
79
             "Expected less than 2 globalized records: one for target and one "
1807
79
             "for teams.");
1808
79
      unsigned Offset = 0;
1809
4
      for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
1810
4
        QualType RDTy = CGM.getContext().getRecordType(RD);
1811
4
        unsigned Alignment =
1812
4
            CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
1813
4
        unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
1814
4
        Offset =
1815
4
            llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
1816
4
      }
1817
79
      unsigned Alignment =
1818
79
          CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
1819
79
      Offset = llvm::alignTo(Offset, Alignment);
1820
79
      GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
1821
79
      ++GlobalizedRecords.back().RegionCounter;
1822
79
      if (GlobalizedRecords.back().Records.size() == 1) {
1823
75
        assert(KernelStaticGlobalized &&
1824
75
               "Kernel static pointer must be initialized already.");
1825
75
        auto *UseSharedMemory = new llvm::GlobalVariable(
1826
75
            CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
1827
75
            llvm::GlobalValue::InternalLinkage, nullptr,
1828
75
            "_openmp_static_kernel$is_shared");
1829
75
        UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1830
75
        QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
1831
75
            /*DestWidth=*/16, /*Signed=*/0);
1832
75
        llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
1833
75
            Address(UseSharedMemory,
1834
75
                    CGM.getContext().getTypeAlignInChars(Int16Ty)),
1835
75
            /*Volatile=*/false, Int16Ty, Loc);
1836
75
        auto *StaticGlobalized = new llvm::GlobalVariable(
1837
75
            CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
1838
75
            llvm::GlobalValue::CommonLinkage, nullptr);
1839
75
        auto *RecSize = new llvm::GlobalVariable(
1840
75
            CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
1841
75
            llvm::GlobalValue::InternalLinkage, nullptr,
1842
75
            "_openmp_static_kernel$size");
1843
75
        RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1844
75
        llvm::Value *Ld = CGF.EmitLoadOfScalar(
1845
75
            Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
1846
75
            CGM.getContext().getSizeType(), Loc);
1847
75
        llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1848
75
            KernelStaticGlobalized, CGM.VoidPtrPtrTy);
1849
75
        llvm::Value *GlobalRecordSizeArg[] = {
1850
75
            llvm::ConstantInt::get(
1851
75
                CGM.Int16Ty,
1852
43
                getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 
132
: 0),
1853
75
            StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
1854
75
        CGF.EmitRuntimeCall(
1855
75
            OMPBuilder.getOrCreateRuntimeFunction(
1856
75
                CGM.getModule(), OMPRTL___kmpc_get_team_static_memory),
1857
75
            GlobalRecordSizeArg);
1858
75
        GlobalizedRecords.back().Buffer = StaticGlobalized;
1859
75
        GlobalizedRecords.back().RecSize = RecSize;
1860
75
        GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
1861
75
        GlobalizedRecords.back().Loc = Loc;
1862
75
      }
1863
79
      assert(KernelStaticGlobalized && "Global address must be set already.");
1864
79
      Address FrameAddr = CGF.EmitLoadOfPointer(
1865
79
          Address(KernelStaticGlobalized, CGM.getPointerAlign()),
1866
79
          CGM.getContext()
1867
79
              .getPointerType(CGM.getContext().VoidPtrTy)
1868
79
              .castAs<PointerType>());
1869
79
      llvm::Value *GlobalRecValue =
1870
79
          Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
1871
79
      I->getSecond().GlobalRecordAddr = GlobalRecValue;
1872
79
      I->getSecond().IsInSPMDModeFlag = nullptr;
1873
79
      GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1874
79
          GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
1875
46
    } else {
1876
      // TODO: allow the usage of shared memory to be controlled by
1877
      // the user, for now, default to global.
1878
46
      bool UseSharedMemory =
1879
46
          IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
1880
46
      llvm::Value *GlobalRecordSizeArg[] = {
1881
46
          llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
1882
46
          CGF.Builder.getInt16(UseSharedMemory ? 1 : 
00
)};
1883
46
      llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1884
46
          OMPBuilder.getOrCreateRuntimeFunction(
1885
46
              CGM.getModule(),
1886
46
              IsInTTDRegion ? OMPRTL___kmpc_data_sharing_push_stack
1887
0
                            : OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1888
46
          GlobalRecordSizeArg);
1889
46
      GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1890
46
          GlobalRecValue, GlobalRecPtrTy);
1891
46
      I->getSecond().GlobalRecordAddr = GlobalRecValue;
1892
46
      I->getSecond().IsInSPMDModeFlag = nullptr;
1893
46
    }
1894
136
    LValue Base =
1895
136
        CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
1896
1897
    // Emit the "global alloca" which is a GEP from the global declaration
1898
    // record using the pointer returned by the runtime.
1899
136
    LValue SecBase;
1900
136
    decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
1901
136
    if (IsTTD) {
1902
10
      SecIt = I->getSecond().SecondaryLocalVarData->begin();
1903
10
      llvm::PointerType *SecGlobalRecPtrTy =
1904
10
          CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
1905
10
      SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
1906
10
          Bld.CreatePointerBitCastOrAddrSpaceCast(
1907
10
              I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
1908
10
          SecGlobalRecTy);
1909
10
    }
1910
166
    for (auto &Rec : I->getSecond().LocalVarData) {
1911
166
      bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1912
166
      llvm::Value *ParValue;
1913
166
      if (EscapedParam) {
1914
43
        const auto *VD = cast<VarDecl>(Rec.first);
1915
43
        LValue ParLVal =
1916
43
            CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1917
43
        ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1918
43
      }
1919
166
      LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
1920
      // Emit VarAddr basing on lane-id if required.
1921
166
      QualType VarTy;
1922
166
      if (Rec.second.IsOnePerTeam) {
1923
155
        VarTy = Rec.second.FD->getType();
1924
11
      } else {
1925
11
        llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
1926
11
            VarAddr.getAddress(CGF).getPointer(),
1927
11
            {Bld.getInt32(0), getNVPTXLaneID(CGF)});
1928
11
        VarTy =
1929
11
            Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
1930
11
        VarAddr = CGF.MakeAddrLValue(
1931
11
            Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
1932
11
            AlignmentSource::Decl);
1933
11
      }
1934
166
      Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1935
166
      if (!IsInTTDRegion &&
1936
11
          (WithSPMDCheck ||
1937
11
           
getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown0
)) {
1938
11
        assert(I->getSecond().IsInSPMDModeFlag &&
1939
11
               "Expected unknown execution mode or required SPMD check.");
1940
11
        if (IsTTD) {
1941
10
          assert(SecIt->second.IsOnePerTeam &&
1942
10
                 "Secondary glob data must be one per team.");
1943
10
          LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
1944
10
          VarAddr.setAddress(
1945
10
              Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
1946
10
                                       VarAddr.getPointer(CGF)),
1947
10
                      VarAddr.getAlignment()));
1948
10
          Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1949
10
        }
1950
11
        Address GlobalPtr = Rec.second.PrivateAddr;
1951
11
        Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
1952
11
        Rec.second.PrivateAddr = Address(
1953
11
            Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
1954
11
                             LocalAddr.getPointer(), GlobalPtr.getPointer()),
1955
11
            LocalAddr.getAlignment());
1956
11
      }
1957
166
      if (EscapedParam) {
1958
43
        const auto *VD = cast<VarDecl>(Rec.first);
1959
43
        CGF.EmitStoreOfScalar(ParValue, VarAddr);
1960
43
        I->getSecond().MappedParams->setVarAddr(CGF, VD,
1961
43
                                                VarAddr.getAddress(CGF));
1962
43
      }
1963
166
      if (IsTTD)
1964
10
        ++SecIt;
1965
166
    }
1966
136
  }
1967
0
  for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
1968
    // Recover pointer to this function's global record. The runtime will
1969
    // handle the specifics of the allocation of the memory.
1970
    // Use actual memory size of the record including the padding
1971
    // for alignment purposes.
1972
0
    CGBuilderTy &Bld = CGF.Builder;
1973
0
    llvm::Value *Size = CGF.getTypeSize(VD->getType());
1974
0
    CharUnits Align = CGM.getContext().getDeclAlign(VD);
1975
0
    Size = Bld.CreateNUWAdd(
1976
0
        Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1977
0
    llvm::Value *AlignVal =
1978
0
        llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1979
0
    Size = Bld.CreateUDiv(Size, AlignVal);
1980
0
    Size = Bld.CreateNUWMul(Size, AlignVal);
1981
    // TODO: allow the usage of shared memory to be controlled by
1982
    // the user, for now, default to global.
1983
0
    llvm::Value *GlobalRecordSizeArg[] = {
1984
0
        Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1985
0
    llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1986
0
        OMPBuilder.getOrCreateRuntimeFunction(
1987
0
            CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1988
0
        GlobalRecordSizeArg);
1989
0
    llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1990
0
        GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
1991
0
    LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
1992
0
                                     CGM.getContext().getDeclAlign(VD),
1993
0
                                     AlignmentSource::Decl);
1994
0
    I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1995
0
                                            Base.getAddress(CGF));
1996
0
    I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
1997
0
  }
1998
136
  I->getSecond().MappedParams->apply(CGF);
1999
136
}
2000
2001
void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
2002
667
                                                 bool WithSPMDCheck) {
2003
667
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
2004
170
      getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
2005
2
    return;
2006
2007
665
  const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2008
665
  if (I != FunctionGlobalizedDecls.end()) {
2009
136
    I->getSecond().MappedParams->restore(CGF);
2010
136
    if (!CGF.HaveInsertPoint())
2011
0
      return;
2012
136
    for (llvm::Value *Addr :
2013
0
         llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
2014
0
      CGF.EmitRuntimeCall(
2015
0
          OMPBuilder.getOrCreateRuntimeFunction(
2016
0
              CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2017
0
          Addr);
2018
0
    }
2019
136
    if (I->getSecond().GlobalRecordAddr) {
2020
136
      if (!IsInTTDRegion &&
2021
11
          (WithSPMDCheck ||
2022
11
           
getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown0
)) {
2023
11
        CGBuilderTy &Bld = CGF.Builder;
2024
11
        llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2025
11
        llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2026
11
        Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
2027
        // There is no need to emit line number for unconditional branch.
2028
11
        (void)ApplyDebugLocation::CreateEmpty(CGF);
2029
11
        CGF.EmitBlock(NonSPMDBB);
2030
11
        CGF.EmitRuntimeCall(
2031
11
            OMPBuilder.getOrCreateRuntimeFunction(
2032
11
                CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2033
11
            CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
2034
11
        CGF.EmitBlock(ExitBB);
2035
125
      } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && 
IsInTTDRegion79
) {
2036
79
        assert(GlobalizedRecords.back().RegionCounter > 0 &&
2037
79
               "region counter must be > 0.");
2038
79
        --GlobalizedRecords.back().RegionCounter;
2039
        // Emit the restore function only in the target region.
2040
79
        if (GlobalizedRecords.back().RegionCounter == 0) {
2041
75
          QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2042
75
              /*DestWidth=*/16, /*Signed=*/0);
2043
75
          llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2044
75
              Address(GlobalizedRecords.back().UseSharedMemory,
2045
75
                      CGM.getContext().getTypeAlignInChars(Int16Ty)),
2046
75
              /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
2047
75
          llvm::Value *Args[] = {
2048
75
              llvm::ConstantInt::get(
2049
75
                  CGM.Int16Ty,
2050
43
                  getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 
132
: 0),
2051
75
              IsInSharedMemory};
2052
75
          CGF.EmitRuntimeCall(
2053
75
              OMPBuilder.getOrCreateRuntimeFunction(
2054
75
                  CGM.getModule(), OMPRTL___kmpc_restore_team_static_memory),
2055
75
              Args);
2056
75
        }
2057
46
      } else {
2058
46
        CGF.EmitRuntimeCall(
2059
46
            OMPBuilder.getOrCreateRuntimeFunction(
2060
46
                CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2061
46
            I->getSecond().GlobalRecordAddr);
2062
46
      }
2063
136
    }
2064
136
  }
2065
665
}
2066
2067
void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
2068
                                         const OMPExecutableDirective &D,
2069
                                         SourceLocation Loc,
2070
                                         llvm::Function *OutlinedFn,
2071
467
                                         ArrayRef<llvm::Value *> CapturedVars) {
2072
467
  if (!CGF.HaveInsertPoint())
2073
0
    return;
2074
2075
467
  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2076
467
                                                      /*Name=*/".zero.addr");
2077
467
  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2078
467
  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2079
467
  OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
2080
467
  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2081
467
  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2082
467
  emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2083
467
}
2084
2085
void CGOpenMPRuntimeGPU::emitParallelCall(
2086
    CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2087
707
    ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2088
707
  if (!CGF.HaveInsertPoint())
2089
0
    return;
2090
2091
707
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
2092
633
    emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2093
74
  else
2094
74
    emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2095
707
}
2096
2097
void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall(
2098
    CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2099
74
    ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2100
74
  llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
2101
2102
  // Force inline this outlined function at its call site.
2103
74
  Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2104
2105
74
  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2106
74
                                                      /*Name=*/".zero.addr");
2107
74
  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2108
  // ThreadId for serialized parallels is 0.
2109
74
  Address ThreadIDAddr = ZeroAddr;
2110
74
  auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
2111
16
                       CodeGenFunction &CGF, PrePostActionTy &Action) {
2112
16
    Action.Enter(CGF);
2113
2114
16
    Address ZeroAddr =
2115
16
        CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2116
16
                                         /*Name=*/".bound.zero.addr");
2117
16
    CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2118
16
    llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2119
16
    OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2120
16
    OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2121
16
    OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2122
16
    emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
2123
16
  };
2124
74
  auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2125
16
                                        PrePostActionTy &) {
2126
2127
16
    RegionCodeGenTy RCG(CodeGen);
2128
16
    llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2129
16
    llvm::Value *ThreadID = getThreadID(CGF, Loc);
2130
16
    llvm::Value *Args[] = {RTLoc, ThreadID};
2131
2132
16
    NVPTXActionTy Action(
2133
16
        OMPBuilder.getOrCreateRuntimeFunction(
2134
16
            CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
2135
16
        Args,
2136
16
        OMPBuilder.getOrCreateRuntimeFunction(
2137
16
            CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
2138
16
        Args);
2139
16
    RCG.setAction(Action);
2140
16
    RCG(CGF);
2141
16
  };
2142
2143
74
  auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
2144
68
                                                  PrePostActionTy &Action) {
2145
68
    CGBuilderTy &Bld = CGF.Builder;
2146
68
    llvm::Function *WFn = WrapperFunctionsMap[Fn];
2147
68
    assert(WFn && "Wrapper function does not exist!");
2148
68
    llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
2149
2150
    // Prepare for parallel region. Indicate the outlined function.
2151
68
    llvm::Value *Args[] = {ID};
2152
68
    CGF.EmitRuntimeCall(
2153
68
        OMPBuilder.getOrCreateRuntimeFunction(
2154
68
            CGM.getModule(), OMPRTL___kmpc_kernel_prepare_parallel),
2155
68
        Args);
2156
2157
    // Create a private scope that will globalize the arguments
2158
    // passed from the outside of the target region.
2159
68
    CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
2160
2161
    // There's something to share.
2162
68
    if (!CapturedVars.empty()) {
2163
      // Prepare for parallel region. Indicate the outlined function.
2164
27
      Address SharedArgs =
2165
27
          CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
2166
27
      llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
2167
2168
27
      llvm::Value *DataSharingArgs[] = {
2169
27
          SharedArgsPtr,
2170
27
          llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
2171
27
      CGF.EmitRuntimeCall(
2172
27
          OMPBuilder.getOrCreateRuntimeFunction(
2173
27
              CGM.getModule(), OMPRTL___kmpc_begin_sharing_variables),
2174
27
          DataSharingArgs);
2175
2176
      // Store variable address in a list of references to pass to workers.
2177
27
      unsigned Idx = 0;
2178
27
      ASTContext &Ctx = CGF.getContext();
2179
27
      Address SharedArgListAddress = CGF.EmitLoadOfPointer(
2180
27
          SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
2181
27
                          .castAs<PointerType>());
2182
46
      for (llvm::Value *V : CapturedVars) {
2183
46
        Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
2184
46
        llvm::Value *PtrV;
2185
46
        if (V->getType()->isIntegerTy())
2186
0
          PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
2187
46
        else
2188
46
          PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
2189
46
        CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
2190
46
                              Ctx.getPointerType(Ctx.VoidPtrTy));
2191
46
        ++Idx;
2192
46
      }
2193
27
    }
2194
2195
    // Activate workers. This barrier is used by the master to signal
2196
    // work for the workers.
2197
68
    syncCTAThreads(CGF);
2198
2199
    // OpenMP [2.5, Parallel Construct, p.49]
2200
    // There is an implied barrier at the end of a parallel region. After the
2201
    // end of a parallel region, only the master thread of the team resumes
2202
    // execution of the enclosing task region.
2203
    //
2204
    // The master waits at this barrier until all workers are done.
2205
68
    syncCTAThreads(CGF);
2206
2207
68
    if (!CapturedVars.empty())
2208
27
      CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2209
27
          CGM.getModule(), OMPRTL___kmpc_end_sharing_variables));
2210
2211
    // Remember for post-processing in worker loop.
2212
68
    Work.emplace_back(WFn);
2213
68
  };
2214
2215
74
  auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
2216
68
                             CodeGenFunction &CGF, PrePostActionTy &Action) {
2217
68
    if (IsInParallelRegion) {
2218
0
      SeqGen(CGF, Action);
2219
68
    } else if (IsInTargetMasterThreadRegion) {
2220
64
      L0ParallelGen(CGF, Action);
2221
4
    } else {
2222
      // Check for master and then parallelism:
2223
      // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
2224
      //   Serialized execution.
2225
      // } else {
2226
      //   Worker call.
2227
      // }
2228
4
      CGBuilderTy &Bld = CGF.Builder;
2229
4
      llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2230
4
      llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
2231
4
      llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
2232
4
      llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
2233
4
      llvm::Value *IsSPMD = Bld.CreateIsNotNull(
2234
4
          CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2235
4
              CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
2236
4
      Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
2237
      // There is no need to emit line number for unconditional branch.
2238
4
      (void)ApplyDebugLocation::CreateEmpty(CGF);
2239
4
      CGF.EmitBlock(ParallelCheckBB);
2240
4
      llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2241
4
      llvm::Value *ThreadID = getThreadID(CGF, Loc);
2242
4
      llvm::Value *PL = CGF.EmitRuntimeCall(
2243
4
          OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2244
4
                                                OMPRTL___kmpc_parallel_level),
2245
4
          {RTLoc, ThreadID});
2246
4
      llvm::Value *Res = Bld.CreateIsNotNull(PL);
2247
4
      Bld.CreateCondBr(Res, SeqBB, MasterBB);
2248
4
      CGF.EmitBlock(SeqBB);
2249
4
      SeqGen(CGF, Action);
2250
4
      CGF.EmitBranch(ExitBB);
2251
      // There is no need to emit line number for unconditional branch.
2252
4
      (void)ApplyDebugLocation::CreateEmpty(CGF);
2253
4
      CGF.EmitBlock(MasterBB);
2254
4
      L0ParallelGen(CGF, Action);
2255
4
      CGF.EmitBranch(ExitBB);
2256
      // There is no need to emit line number for unconditional branch.
2257
4
      (void)ApplyDebugLocation::CreateEmpty(CGF);
2258
      // Emit the continuation block for code after the if.
2259
4
      CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2260
4
    }
2261
68
  };
2262
2263
74
  if (IfCond) {
2264
18
    emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
2265
56
  } else {
2266
56
    CodeGenFunction::RunCleanupsScope Scope(CGF);
2267
56
    RegionCodeGenTy ThenRCG(LNParallelGen);
2268
56
    ThenRCG(CGF);
2269
56
  }
2270
74
}
2271
2272
void CGOpenMPRuntimeGPU::emitSPMDParallelCall(
2273
    CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2274
633
    ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2275
  // Just call the outlined function to execute the parallel region.
2276
  // OutlinedFn(&GTid, &zero, CapturedStruct);
2277
  //
2278
633
  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2279
2280
633
  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2281
633
                                                      /*Name=*/".zero.addr");
2282
633
  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2283
  // ThreadId for serialized parallels is 0.
2284
633
  Address ThreadIDAddr = ZeroAddr;
2285
633
  auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
2286
633
                       CodeGenFunction &CGF, PrePostActionTy &Action) {
2287
633
    Action.Enter(CGF);
2288
2289
633
    Address ZeroAddr =
2290
633
        CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2291
633
                                         /*Name=*/".bound.zero.addr");
2292
633
    CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2293
633
    llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2294
633
    OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2295
633
    OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2296
633
    OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2297
633
    emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2298
633
  };
2299
633
  auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2300
3
                                        PrePostActionTy &) {
2301
2302
3
    RegionCodeGenTy RCG(CodeGen);
2303
3
    llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2304
3
    llvm::Value *ThreadID = getThreadID(CGF, Loc);
2305
3
    llvm::Value *Args[] = {RTLoc, ThreadID};
2306
2307
3
    NVPTXActionTy Action(
2308
3
        OMPBuilder.getOrCreateRuntimeFunction(
2309
3
            CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
2310
3
        Args,
2311
3
        OMPBuilder.getOrCreateRuntimeFunction(
2312
3
            CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
2313
3
        Args);
2314
3
    RCG.setAction(Action);
2315
3
    RCG(CGF);
2316
3
  };
2317
2318
633
  if (IsInTargetMasterThreadRegion) {
2319
    // In the worker need to use the real thread id.
2320
630
    ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2321
630
    RegionCodeGenTy RCG(CodeGen);
2322
630
    RCG(CGF);
2323
3
  } else {
2324
    // If we are not in the target region, it is definitely L2 parallelism or
2325
    // more, because for SPMD mode we always has L1 parallel level, sowe don't
2326
    // need to check for orphaned directives.
2327
3
    RegionCodeGenTy RCG(SeqGen);
2328
3
    RCG(CGF);
2329
3
  }
2330
633
}
2331
2332
703
void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
2333
  // Always emit simple barriers!
2334
703
  if (!CGF.HaveInsertPoint())
2335
0
    return;
2336
  // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
2337
  // This function does not use parameters, so we can emit just default values.
2338
703
  llvm::Value *Args[] = {
2339
703
      llvm::ConstantPointerNull::get(
2340
703
          cast<llvm::PointerType>(getIdentTyPointerTy())),
2341
703
      llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
2342
703
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2343
703
                          CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
2344
703
                      Args);
2345
703
}
2346
2347
void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
2348
                                           SourceLocation Loc,
2349
                                           OpenMPDirectiveKind Kind, bool,
2350
216
                                           bool) {
2351
  // Always emit simple barriers!
2352
216
  if (!CGF.HaveInsertPoint())
2353
0
    return;
2354
  // Build call __kmpc_cancel_barrier(loc, thread_id);
2355
216
  unsigned Flags = getDefaultFlagsForBarriers(Kind);
2356
216
  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2357
216
                         getThreadID(CGF, Loc)};
2358
2359
216
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2360
216
                          CGM.getModule(), OMPRTL___kmpc_barrier),
2361
216
                      Args);
2362
216
}
2363
2364
void CGOpenMPRuntimeGPU::emitCriticalRegion(
2365
    CodeGenFunction &CGF, StringRef CriticalName,
2366
    const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2367
6
    const Expr *Hint) {
2368
6
  llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2369
6
  llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2370
6
  llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2371
6
  llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2372
6
  llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2373
2374
6
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2375
2376
  // Get the mask of active threads in the warp.
2377
6
  llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2378
6
      CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
2379
  // Fetch team-local id of the thread.
2380
6
  llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2381
2382
  // Get the width of the team.
2383
6
  llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
2384
2385
  // Initialize the counter variable for the loop.
2386
6
  QualType Int32Ty =
2387
6
      CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2388
6
  Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2389
6
  LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2390
6
  CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2391
6
                        /*isInit=*/true);
2392
2393
  // Block checks if loop counter exceeds upper bound.
2394
6
  CGF.EmitBlock(LoopBB);
2395
6
  llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2396
6
  llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2397
6
  CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2398
2399
  // Block tests which single thread should execute region, and which threads
2400
  // should go straight to synchronisation point.
2401
6
  CGF.EmitBlock(TestBB);
2402
6
  CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2403
6
  llvm::Value *CmpThreadToCounter =
2404
6
      CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2405
6
  CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2406
2407
  // Block emits the body of the critical region.
2408
6
  CGF.EmitBlock(BodyBB);
2409
2410
  // Output the critical statement.
2411
6
  CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
2412
6
                                      Hint);
2413
2414
  // After the body surrounded by the critical region, the single executing
2415
  // thread will jump to the synchronisation point.
2416
  // Block waits for all threads in current team to finish then increments the
2417
  // counter variable and returns to the loop.
2418
6
  CGF.EmitBlock(SyncBB);
2419
  // Reconverge active threads in the warp.
2420
6
  (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2421
6
                                CGM.getModule(), OMPRTL___kmpc_syncwarp),
2422
6
                            Mask);
2423
2424
6
  llvm::Value *IncCounterVal =
2425
6
      CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2426
6
  CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2427
6
  CGF.EmitBranch(LoopBB);
2428
2429
  // Block that is reached when  all threads in the team complete the region.
2430
6
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2431
6
}
2432
2433
/// Cast value to the specified type.
2434
static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2435
                                    QualType ValTy, QualType CastTy,
2436
126
                                    SourceLocation Loc) {
2437
126
  assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
2438
126
         "Cast type must sized.");
2439
126
  assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
2440
126
         "Val type must sized.");
2441
126
  llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2442
126
  if (ValTy == CastTy)
2443
78
    return Val;
2444
48
  if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2445
48
      CGF.getContext().getTypeSizeInChars(CastTy))
2446
0
    return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2447
48
  if (CastTy->isIntegerType() && ValTy->isIntegerType())
2448
48
    return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2449
48
                                     CastTy->hasSignedIntegerRepresentation());
2450
0
  Address CastItem = CGF.CreateMemTemp(CastTy);
2451
0
  Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2452
0
      CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2453
0
  CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
2454
0
                        LValueBaseInfo(AlignmentSource::Type),
2455
0
                        TBAAAccessInfo());
2456
0
  return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
2457
0
                              LValueBaseInfo(AlignmentSource::Type),
2458
0
                              TBAAAccessInfo());
2459
0
}
2460
2461
/// This function creates calls to one of two shuffle functions to copy
2462
/// variables between lanes in a warp.
2463
static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2464
                                                 llvm::Value *Elem,
2465
                                                 QualType ElemType,
2466
                                                 llvm::Value *Offset,
2467
63
                                                 SourceLocation Loc) {
2468
63
  CodeGenModule &CGM = CGF.CGM;
2469
63
  CGBuilderTy &Bld = CGF.Builder;
2470
63
  CGOpenMPRuntimeGPU &RT =
2471
63
      *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
2472
63
  llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
2473
2474
63
  CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2475
63
  assert(Size.getQuantity() <= 8 &&
2476
63
         "Unsupported bitwidth in shuffle instruction.");
2477
2478
63
  RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
2479
48
                                  ? OMPRTL___kmpc_shuffle_int32
2480
15
                                  : OMPRTL___kmpc_shuffle_int64;
2481
2482
  // Cast all types to 32- or 64-bit values before calling shuffle routines.
2483
63
  QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2484
48
      Size.getQuantity() <= 4 ? 32 : 
6415
, /*Signed=*/1);
2485
63
  llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2486
63
  llvm::Value *WarpSize =
2487
63
      Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2488
2489
63
  llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2490
63
      OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
2491
63
      {ElemCast, Offset, WarpSize});
2492
2493
63
  return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2494
63
}
2495
2496
static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2497
                            Address DestAddr, QualType ElemType,
2498
63
                            llvm::Value *Offset, SourceLocation Loc) {
2499
63
  CGBuilderTy &Bld = CGF.Builder;
2500
2501
63
  CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2502
  // Create the loop over the big sized data.
2503
  // ptr = (void*)Elem;
2504
  // ptrEnd = (void*) Elem + 1;
2505
  // Step = 8;
2506
  // while (ptr + Step < ptrEnd)
2507
  //   shuffle((int64_t)*ptr);
2508
  // Step = 4;
2509
  // while (ptr + Step < ptrEnd)
2510
  //   shuffle((int32_t)*ptr);
2511
  // ...
2512
63
  Address ElemPtr = DestAddr;
2513
63
  Address Ptr = SrcAddr;
2514
63
  Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2515
63
      Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
2516
315
  for (int IntSize = 8; IntSize >= 1; 
IntSize /= 2252
) {
2517
252
    if (Size < CharUnits::fromQuantity(IntSize))
2518
189
      continue;
2519
63
    QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2520
63
        CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2521
63
        /*Signed=*/1);
2522
63
    llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2523
63
    Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2524
63
    ElemPtr =
2525
63
        Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2526
63
    if (Size.getQuantity() / IntSize > 1) {
2527
3
      llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2528
3
      llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2529
3
      llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2530
3
      llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2531
3
      CGF.EmitBlock(PreCondBB);
2532
3
      llvm::PHINode *PhiSrc =
2533
3
          Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2534
3
      PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2535
3
      llvm::PHINode *PhiDest =
2536
3
          Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2537
3
      PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2538
3
      Ptr = Address(PhiSrc, Ptr.getAlignment());
2539
3
      ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2540
3
      llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2541
3
          PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2542
3
                                   Ptr.getPointer(), CGF.VoidPtrTy));
2543
3
      Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2544
3
                       ThenBB, ExitBB);
2545
3
      CGF.EmitBlock(ThenBB);
2546
3
      llvm::Value *Res = createRuntimeShuffleFunction(
2547
3
          CGF,
2548
3
          CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2549
3
                               LValueBaseInfo(AlignmentSource::Type),
2550
3
                               TBAAAccessInfo()),
2551
3
          IntType, Offset, Loc);
2552
3
      CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2553
3
                            LValueBaseInfo(AlignmentSource::Type),
2554
3
                            TBAAAccessInfo());
2555
3
      Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
2556
3
      Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2557
3
      PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
2558
3
      PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
2559
3
      CGF.EmitBranch(PreCondBB);
2560
3
      CGF.EmitBlock(ExitBB);
2561
60
    } else {
2562
60
      llvm::Value *Res = createRuntimeShuffleFunction(
2563
60
          CGF,
2564
60
          CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2565
60
                               LValueBaseInfo(AlignmentSource::Type),
2566
60
                               TBAAAccessInfo()),
2567
60
          IntType, Offset, Loc);
2568
60
      CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2569
60
                            LValueBaseInfo(AlignmentSource::Type),
2570
60
                            TBAAAccessInfo());
2571
60
      Ptr = Bld.CreateConstGEP(Ptr, 1);
2572
60
      ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2573
60
    }
2574
63
    Size = Size % IntSize;
2575
63
  }
2576
63
}
2577
2578
namespace {
2579
enum CopyAction : unsigned {
2580
  // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2581
  // the warp using shuffle instructions.
2582
  RemoteLaneToThread,
2583
  // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2584
  ThreadCopy,
2585
  // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2586
  ThreadToScratchpad,
2587
  // ScratchpadToThread: Copy from a scratchpad array in global memory
2588
  // containing team-reduced data to a thread's stack.
2589
  ScratchpadToThread,
2590
};
2591
} // namespace
2592
2593
struct CopyOptionsTy {
2594
  llvm::Value *RemoteLaneOffset;
2595
  llvm::Value *ScratchpadIndex;
2596
  llvm::Value *ScratchpadWidth;
2597
};
2598
2599
/// Emit instructions to copy a Reduce list, which contains partially
2600
/// aggregated values, in the specified direction.
2601
static void emitReductionListCopy(
2602
    CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2603
    ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2604
78
    CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2605
2606
78
  CodeGenModule &CGM = CGF.CGM;
2607
78
  ASTContext &C = CGM.getContext();
2608
78
  CGBuilderTy &Bld = CGF.Builder;
2609
2610
78
  llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
2611
78
  llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
2612
78
  llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
2613
2614
  // Iterates, element-by-element, through the source Reduce list and
2615
  // make a copy.
2616
78
  unsigned Idx = 0;
2617
78
  unsigned Size = Privates.size();
2618
126
  for (const Expr *Private : Privates) {
2619
126
    Address SrcElementAddr = Address::invalid();
2620
126
    Address DestElementAddr = Address::invalid();
2621
126
    Address DestElementPtrAddr = Address::invalid();
2622
    // Should we shuffle in an element from a remote lane?
2623
126
    bool ShuffleInElement = false;
2624
    // Set to true to update the pointer in the dest Reduce list to a
2625
    // newly created element.
2626
126
    bool UpdateDestListPtr = false;
2627
    // Increment the src or dest pointer to the scratchpad, for each
2628
    // new element.
2629
126
    bool IncrScratchpadSrc = false;
2630
126
    bool IncrScratchpadDest = false;
2631
2632
126
    switch (Action) {
2633
63
    case RemoteLaneToThread: {
2634
      // Step 1.1: Get the address for the src element in the Reduce list.
2635
63
      Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2636
63
      SrcElementAddr = CGF.EmitLoadOfPointer(
2637
63
          SrcElementPtrAddr,
2638
63
          C.getPointerType(Private->getType())->castAs<PointerType>());
2639
2640
      // Step 1.2: Create a temporary to store the element in the destination
2641
      // Reduce list.
2642
63
      DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2643
63
      DestElementAddr =
2644
63
          CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2645
63
      ShuffleInElement = true;
2646
63
      UpdateDestListPtr = true;
2647
63
      break;
2648
0
    }
2649
63
    case ThreadCopy: {
2650
      // Step 1.1: Get the address for the src element in the Reduce list.
2651
63
      Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2652
63
      SrcElementAddr = CGF.EmitLoadOfPointer(
2653
63
          SrcElementPtrAddr,
2654
63
          C.getPointerType(Private->getType())->castAs<PointerType>());
2655
2656
      // Step 1.2: Get the address for dest element.  The destination
2657
      // element has already been created on the thread's stack.
2658
63
      DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2659
63
      DestElementAddr = CGF.EmitLoadOfPointer(
2660
63
          DestElementPtrAddr,
2661
63
          C.getPointerType(Private->getType())->castAs<PointerType>());
2662
63
      break;
2663
0
    }
2664
0
    case ThreadToScratchpad: {
2665
      // Step 1.1: Get the address for the src element in the Reduce list.
2666
0
      Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2667
0
      SrcElementAddr = CGF.EmitLoadOfPointer(
2668
0
          SrcElementPtrAddr,
2669
0
          C.getPointerType(Private->getType())->castAs<PointerType>());
2670
2671
      // Step 1.2: Get the address for dest element:
2672
      // address = base + index * ElementSizeInChars.
2673
0
      llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2674
0
      llvm::Value *CurrentOffset =
2675
0
          Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2676
0
      llvm::Value *ScratchPadElemAbsolutePtrVal =
2677
0
          Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
2678
0
      ScratchPadElemAbsolutePtrVal =
2679
0
          Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2680
0
      DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2681
0
                                C.getTypeAlignInChars(Private->getType()));
2682
0
      IncrScratchpadDest = true;
2683
0
      break;
2684
0
    }
2685
0
    case ScratchpadToThread: {
2686
      // Step 1.1: Get the address for the src element in the scratchpad.
2687
      // address = base + index * ElementSizeInChars.
2688
0
      llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2689
0
      llvm::Value *CurrentOffset =
2690
0
          Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2691
0
      llvm::Value *ScratchPadElemAbsolutePtrVal =
2692
0
          Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
2693
0
      ScratchPadElemAbsolutePtrVal =
2694
0
          Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2695
0
      SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2696
0
                               C.getTypeAlignInChars(Private->getType()));
2697
0
      IncrScratchpadSrc = true;
2698
2699
      // Step 1.2: Create a temporary to store the element in the destination
2700
      // Reduce list.
2701
0
      DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2702
0
      DestElementAddr =
2703
0
          CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2704
0
      UpdateDestListPtr = true;
2705
0
      break;
2706
126
    }
2707
126
    }
2708
2709
    // Regardless of src and dest of copy, we emit the load of src
2710
    // element as this is required in all directions
2711
126
    SrcElementAddr = Bld.CreateElementBitCast(
2712
126
        SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
2713
126
    DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
2714
126
                                               SrcElementAddr.getElementType());
2715
2716
    // Now that all active lanes have read the element in the
2717
    // Reduce list, shuffle over the value from the remote lane.
2718
126
    if (ShuffleInElement) {
2719
63
      shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
2720
63
                      RemoteLaneOffset, Private->getExprLoc());
2721
63
    } else {
2722
63
      switch (CGF.getEvaluationKind(Private->getType())) {
2723
57
      case TEK_Scalar: {
2724
57
        llvm::Value *Elem = CGF.EmitLoadOfScalar(
2725
57
            SrcElementAddr, /*Volatile=*/false, Private->getType(),
2726
57
            Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
2727
57
            TBAAAccessInfo());
2728
        // Store the source element value to the dest element address.
2729
57
        CGF.EmitStoreOfScalar(
2730
57
            Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
2731
57
            LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2732
57
        break;
2733
0
      }
2734
0
      case TEK_Complex: {
2735
0
        CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
2736
0
            CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2737
0
            Private->getExprLoc());
2738
0
        CGF.EmitStoreOfComplex(
2739
0
            Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2740
0
            /*isInit=*/false);
2741
0
        break;
2742
0
      }
2743
6
      case TEK_Aggregate:
2744
6
        CGF.EmitAggregateCopy(
2745
6
            CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2746
6
            CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2747
6
            Private->getType(), AggValueSlot::DoesNotOverlap);
2748
6
        break;
2749
126
      }
2750
126
    }
2751
2752
    // Step 3.1: Modify reference in dest Reduce list as needed.
2753
    // Modifying the reference in Reduce list to point to the newly
2754
    // created element.  The element is live in the current function
2755
    // scope and that of functions it invokes (i.e., reduce_function).
2756
    // RemoteReduceData[i] = (void*)&RemoteElem
2757
126
    if (UpdateDestListPtr) {
2758
63
      CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
2759
63
                                DestElementAddr.getPointer(), CGF.VoidPtrTy),
2760
63
                            DestElementPtrAddr, /*Volatile=*/false,
2761
63
                            C.VoidPtrTy);
2762
63
    }
2763
2764
    // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2765
    // address of the next element in scratchpad memory, unless we're currently
2766
    // processing the last one.  Memory alignment is also taken care of here.
2767
126
    if ((IncrScratchpadDest || IncrScratchpadSrc) && 
(Idx + 1 < Size)0
) {
2768
0
      llvm::Value *ScratchpadBasePtr =
2769
0
          IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2770
0
      llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2771
0
      ScratchpadBasePtr = Bld.CreateNUWAdd(
2772
0
          ScratchpadBasePtr,
2773
0
          Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2774
2775
      // Take care of global memory alignment for performance
2776
0
      ScratchpadBasePtr = Bld.CreateNUWSub(
2777
0
          ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2778
0
      ScratchpadBasePtr = Bld.CreateUDiv(
2779
0
          ScratchpadBasePtr,
2780
0
          llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2781
0
      ScratchpadBasePtr = Bld.CreateNUWAdd(
2782
0
          ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2783
0
      ScratchpadBasePtr = Bld.CreateNUWMul(
2784
0
          ScratchpadBasePtr,
2785
0
          llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2786
2787
0
      if (IncrScratchpadDest)
2788
0
        DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2789
0
      else /* IncrScratchpadSrc = true */
2790
0
        SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2791
0
    }
2792
2793
126
    ++Idx;
2794
126
  }
2795
78
}
2796
2797
/// This function emits a helper that gathers Reduce lists from the first
2798
/// lane of every active warp to lanes in the first warp.
2799
///
2800
/// void inter_warp_copy_func(void* reduce_data, num_warps)
2801
///   shared smem[warp_size];
2802
///   For all data entries D in reduce_data:
2803
///     sync
2804
///     If (I am the first lane in each warp)
2805
///       Copy my local D to smem[warp_id]
2806
///     sync
2807
///     if (I am the first warp)
2808
///       Copy smem[thread_id] to my local D
2809
static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2810
                                              ArrayRef<const Expr *> Privates,
2811
                                              QualType ReductionArrayTy,
2812
39
                                              SourceLocation Loc) {
2813
39
  ASTContext &C = CGM.getContext();
2814
39
  llvm::Module &M = CGM.getModule();
2815
2816
  // ReduceList: thread local Reduce list.
2817
  // At the stage of the computation when this function is called, partially
2818
  // aggregated values reside in the first lane of every active warp.
2819
39
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2820
39
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
2821
  // NumWarps: number of warps active in the parallel region.  This could
2822
  // be smaller than 32 (max warps in a CTA) for partial block reduction.
2823
39
  ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2824
39
                                C.getIntTypeForBitwidth(32, /* Signed */ true),
2825
39
                                ImplicitParamDecl::Other);
2826
39
  FunctionArgList Args;
2827
39
  Args.push_back(&ReduceListArg);
2828
39
  Args.push_back(&NumWarpsArg);
2829
2830
39
  const CGFunctionInfo &CGFI =
2831
39
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2832
39
  auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2833
39
                                    llvm::GlobalValue::InternalLinkage,
2834
39
                                    "_omp_reduction_inter_warp_copy_func", &M);
2835
39
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2836
39
  Fn->setDoesNotRecurse();
2837
39
  CodeGenFunction CGF(CGM);
2838
39
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2839
2840
39
  CGBuilderTy &Bld = CGF.Builder;
2841
2842
  // This array is used as a medium to transfer, one reduce element at a time,
2843
  // the data from the first lane of every warp to lanes in the first warp
2844
  // in order to perform the final step of a reduction in a parallel region
2845
  // (reduction across warps).  The array is placed in NVPTX __shared__ memory
2846
  // for reduced latency, as well as to have a distinct copy for concurrently
2847
  // executing target regions.  The array is declared with common linkage so
2848
  // as to be shared across compilation units.
2849
39
  StringRef TransferMediumName =
2850
39
      "__openmp_nvptx_data_transfer_temporary_storage";
2851
39
  llvm::GlobalVariable *TransferMedium =
2852
39
      M.getGlobalVariable(TransferMediumName);
2853
39
  unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
2854
39
  if (!TransferMedium) {
2855
12
    auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2856
12
    unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2857
12
    TransferMedium = new llvm::GlobalVariable(
2858
12
        M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
2859
12
        llvm::Constant::getNullValue(Ty), TransferMediumName,
2860
12
        /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2861
12
        SharedAddressSpace);
2862
12
    CGM.addCompilerUsedGlobal(TransferMedium);
2863
12
  }
2864
2865
39
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2866
  // Get the CUDA thread id of the current OpenMP thread on the GPU.
2867
39
  llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2868
  // nvptx_lane_id = nvptx_id % warpsize
2869
39
  llvm::Value *LaneID = getNVPTXLaneID(CGF);
2870
  // nvptx_warp_id = nvptx_id / warpsize
2871
39
  llvm::Value *WarpID = getNVPTXWarpID(CGF);
2872
2873
39
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2874
39
  Address LocalReduceList(
2875
39
      Bld.CreatePointerBitCastOrAddrSpaceCast(
2876
39
          CGF.EmitLoadOfScalar(
2877
39
              AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2878
39
              LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
2879
39
          CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2880
39
      CGF.getPointerAlign());
2881
2882
39
  unsigned Idx = 0;
2883
63
  for (const Expr *Private : Privates) {
2884
    //
2885
    // Warp master copies reduce element to transfer medium in __shared__
2886
    // memory.
2887
    //
2888
63
    unsigned RealTySize =
2889
63
        C.getTypeSizeInChars(Private->getType())
2890
63
            .alignTo(C.getTypeAlignInChars(Private->getType()))
2891
63
            .getQuantity();
2892
159
    for (unsigned TySize = 4; TySize > 0 && 
RealTySize > 0150
;
TySize /=296
) {
2893
96
      unsigned NumIters = RealTySize / TySize;
2894
96
      if (NumIters == 0)
2895
33
        continue;
2896
63
      QualType CType = C.getIntTypeForBitwidth(
2897
63
          C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2898
63
      llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2899
63
      CharUnits Align = CharUnits::fromQuantity(TySize);
2900
63
      llvm::Value *Cnt = nullptr;
2901
63
      Address CntAddr = Address::invalid();
2902
63
      llvm::BasicBlock *PrecondBB = nullptr;
2903
63
      llvm::BasicBlock *ExitBB = nullptr;
2904
63
      if (NumIters > 1) {
2905
15
        CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2906
15
        CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2907
15
                              /*Volatile=*/false, C.IntTy);
2908
15
        PrecondBB = CGF.createBasicBlock("precond");
2909
15
        ExitBB = CGF.createBasicBlock("exit");
2910
15
        llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2911
        // There is no need to emit line number for unconditional branch.
2912
15
        (void)ApplyDebugLocation::CreateEmpty(CGF);
2913
15
        CGF.EmitBlock(PrecondBB);
2914
15
        Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2915
15
        llvm::Value *Cmp =
2916
15
            Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2917
15
        Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2918
15
        CGF.EmitBlock(BodyBB);
2919
15
      }
2920
      // kmpc_barrier.
2921
63
      CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2922
63
                                             /*EmitChecks=*/false,
2923
63
                                             /*ForceSimpleCall=*/true);
2924
63
      llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2925
63
      llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2926
63
      llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2927
2928
      // if (lane_id == 0)
2929
63
      llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2930
63
      Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2931
63
      CGF.EmitBlock(ThenBB);
2932
2933
      // Reduce element = LocalReduceList[i]
2934
63
      Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2935
63
      llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2936
63
          ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2937
      // elemptr = ((CopyType*)(elemptrptr)) + I
2938
63
      Address ElemPtr = Address(ElemPtrPtr, Align);
2939
63
      ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2940
63
      if (NumIters > 1) {
2941
15
        ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
2942
15
                          ElemPtr.getAlignment());
2943
15
      }
2944
2945
      // Get pointer to location in transfer medium.
2946
      // MediumPtr = &medium[warp_id]
2947
63
      llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2948
63
          TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2949
63
      Address MediumPtr(MediumPtrVal, Align);
2950
      // Casting to actual data type.
2951
      // MediumPtr = (CopyType*)MediumPtrAddr;
2952
63
      MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
2953
2954
      // elem = *elemptr
2955
      //*MediumPtr = elem
2956
63
      llvm::Value *Elem = CGF.EmitLoadOfScalar(
2957
63
          ElemPtr, /*Volatile=*/false, CType, Loc,
2958
63
          LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2959
      // Store the source element value to the dest element address.
2960
63
      CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2961
63
                            LValueBaseInfo(AlignmentSource::Type),
2962
63
                            TBAAAccessInfo());
2963
2964
63
      Bld.CreateBr(MergeBB);
2965
2966
63
      CGF.EmitBlock(ElseBB);
2967
63
      Bld.CreateBr(MergeBB);
2968
2969
63
      CGF.EmitBlock(MergeBB);
2970
2971
      // kmpc_barrier.
2972
63
      CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2973
63
                                             /*EmitChecks=*/false,
2974
63
                                             /*ForceSimpleCall=*/true);
2975
2976
      //
2977
      // Warp 0 copies reduce element from transfer medium.
2978
      //
2979
63
      llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2980
63
      llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2981
63
      llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2982
2983
63
      Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2984
63
      llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2985
63
          AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2986
2987
      // Up to 32 threads in warp 0 are active.
2988
63
      llvm::Value *IsActiveThread =
2989
63
          Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2990
63
      Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2991
2992
63
      CGF.EmitBlock(W0ThenBB);
2993
2994
      // SrcMediumPtr = &medium[tid]
2995
63
      llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2996
63
          TransferMedium,
2997
63
          {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2998
63
      Address SrcMediumPtr(SrcMediumPtrVal, Align);
2999
      // SrcMediumVal = *SrcMediumPtr;
3000
63
      SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
3001
3002
      // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
3003
63
      Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3004
63
      llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
3005
63
          TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
3006
63
      Address TargetElemPtr = Address(TargetElemPtrVal, Align);
3007
63
      TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
3008
63
      if (NumIters > 1) {
3009
15
        TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
3010
15
                                TargetElemPtr.getAlignment());
3011
15
      }
3012
3013
      // *TargetElemPtr = SrcMediumVal;
3014
63
      llvm::Value *SrcMediumValue =
3015
63
          CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
3016
63
      CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
3017
63
                            CType);
3018
63
      Bld.CreateBr(W0MergeBB);
3019
3020
63
      CGF.EmitBlock(W0ElseBB);
3021
63
      Bld.CreateBr(W0MergeBB);
3022
3023
63
      CGF.EmitBlock(W0MergeBB);
3024
3025
63
      if (NumIters > 1) {
3026
15
        Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
3027
15
        CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
3028
15
        CGF.EmitBranch(PrecondBB);
3029
15
        (void)ApplyDebugLocation::CreateEmpty(CGF);
3030
15
        CGF.EmitBlock(ExitBB);
3031
15
      }
3032
63
      RealTySize %= TySize;
3033
63
    }
3034
63
    ++Idx;
3035
63
  }
3036
3037
39
  CGF.FinishFunction();
3038
39
  return Fn;
3039
39
}
3040
3041
/// Emit a helper that reduces data across two OpenMP threads (lanes)
3042
/// in the same warp.  It uses shuffle instructions to copy over data from
3043
/// a remote lane's stack.  The reduction algorithm performed is specified
3044
/// by the fourth parameter.
3045
///
3046
/// Algorithm Versions.
3047
/// Full Warp Reduce (argument value 0):
3048
///   This algorithm assumes that all 32 lanes are active and gathers
3049
///   data from these 32 lanes, producing a single resultant value.
3050
/// Contiguous Partial Warp Reduce (argument value 1):
3051
///   This algorithm assumes that only a *contiguous* subset of lanes
3052
///   are active.  This happens for the last warp in a parallel region
3053
///   when the user specified num_threads is not an integer multiple of
3054
///   32.  This contiguous subset always starts with the zeroth lane.
3055
/// Partial Warp Reduce (argument value 2):
3056
///   This algorithm gathers data from any number of lanes at any position.
3057
/// All reduced values are stored in the lowest possible lane.  The set
3058
/// of problems every algorithm addresses is a super set of those
3059
/// addressable by algorithms with a lower version number.  Overhead
3060
/// increases as algorithm version increases.
3061
///
3062
/// Terminology
3063
/// Reduce element:
3064
///   Reduce element refers to the individual data field with primitive
3065
///   data types to be combined and reduced across threads.
3066
/// Reduce list:
3067
///   Reduce list refers to a collection of local, thread-private
3068
///   reduce elements.
3069
/// Remote Reduce list:
3070
///   Remote Reduce list refers to a collection of remote (relative to
3071
///   the current thread) reduce elements.
3072
///
3073
/// We distinguish between three states of threads that are important to
3074
/// the implementation of this function.
3075
/// Alive threads:
3076
///   Threads in a warp executing the SIMT instruction, as distinguished from
3077
///   threads that are inactive due to divergent control flow.
3078
/// Active threads:
3079
///   The minimal set of threads that has to be alive upon entry to this
3080
///   function.  The computation is correct iff active threads are alive.
3081
///   Some threads are alive but they are not active because they do not
3082
///   contribute to the computation in any useful manner.  Turning them off
3083
///   may introduce control flow overheads without any tangible benefits.
3084
/// Effective threads:
3085
///   In order to comply with the argument requirements of the shuffle
3086
///   function, we must keep all lanes holding data alive.  But at most
3087
///   half of them perform value aggregation; we refer to this half of
3088
///   threads as effective. The other half is simply handing off their
3089
///   data.
3090
///
3091
/// Procedure
3092
/// Value shuffle:
3093
///   In this step active threads transfer data from higher lane positions
3094
///   in the warp to lower lane positions, creating Remote Reduce list.
3095
/// Value aggregation:
3096
///   In this step, effective threads combine their thread local Reduce list
3097
///   with Remote Reduce list and store the result in the thread local
3098
///   Reduce list.
3099
/// Value copy:
3100
///   In this step, we deal with the assumption made by algorithm 2
3101
///   (i.e. contiguity assumption).  When we have an odd number of lanes
3102
///   active, say 2k+1, only k threads will be effective and therefore k
3103
///   new values will be produced.  However, the Reduce list owned by the
3104
///   (2k+1)th thread is ignored in the value aggregation.  Therefore
3105
///   we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
3106
///   that the contiguity assumption still holds.
3107
static llvm::Function *emitShuffleAndReduceFunction(
3108
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3109
39
    QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
3110
39
  ASTContext &C = CGM.getContext();
3111
3112
  // Thread local Reduce list used to host the values of data to be reduced.
3113
39
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3114
39
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
3115
  // Current lane id; could be logical.
3116
39
  ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
3117
39
                              ImplicitParamDecl::Other);
3118
  // Offset of the remote source lane relative to the current lane.
3119
39
  ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3120
39
                                        C.ShortTy, ImplicitParamDecl::Other);
3121
  // Algorithm version.  This is expected to be known at compile time.
3122
39
  ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3123
39
                               C.ShortTy, ImplicitParamDecl::Other);
3124
39
  FunctionArgList Args;
3125
39
  Args.push_back(&ReduceListArg);
3126
39
  Args.push_back(&LaneIDArg);
3127
39
  Args.push_back(&RemoteLaneOffsetArg);
3128
39
  Args.push_back(&AlgoVerArg);
3129
3130
39
  const CGFunctionInfo &CGFI =
3131
39
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3132
39
  auto *Fn = llvm::Function::Create(
3133
39
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3134
39
      "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
3135
39
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3136
39
  Fn->setDoesNotRecurse();
3137
39
  if (CGM.getLangOpts().Optimize) {
3138
6
    Fn->removeFnAttr(llvm::Attribute::NoInline);
3139
6
    Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
3140
6
    Fn->addFnAttr(llvm::Attribute::AlwaysInline);
3141
6
  }
3142
3143
39
  CodeGenFunction CGF(CGM);
3144
39
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3145
3146
39
  CGBuilderTy &Bld = CGF.Builder;
3147
3148
39
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3149
39
  Address LocalReduceList(
3150
39
      Bld.CreatePointerBitCastOrAddrSpaceCast(
3151
39
          CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3152
39
                               C.VoidPtrTy, SourceLocation()),
3153
39
          CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3154
39
      CGF.getPointerAlign());
3155
3156
39
  Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
3157
39
  llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
3158
39
      AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3159
3160
39
  Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
3161
39
  llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
3162
39
      AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3163
3164
39
  Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
3165
39
  llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
3166
39
      AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3167
3168
  // Create a local thread-private variable to host the Reduce list
3169
  // from a remote lane.
3170
39
  Address RemoteReduceList =
3171
39
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
3172
3173
  // This loop iterates through the list of reduce elements and copies,
3174
  // element by element, from a remote lane in the warp to RemoteReduceList,
3175
  // hosted on the thread's stack.
3176
39
  emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
3177
39
                        LocalReduceList, RemoteReduceList,
3178
39
                        {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
3179
39
                         /*ScratchpadIndex=*/nullptr,
3180
39
                         /*ScratchpadWidth=*/nullptr});
3181
3182
  // The actions to be performed on the Remote Reduce list is dependent
3183
  // on the algorithm version.
3184
  //
3185
  //  if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
3186
  //  LaneId % 2 == 0 && Offset > 0):
3187
  //    do the reduction value aggregation
3188
  //
3189
  //  The thread local variable Reduce list is mutated in place to host the
3190
  //  reduced data, which is the aggregated value produced from local and
3191
  //  remote lanes.
3192
  //
3193
  //  Note that AlgoVer is expected to be a constant integer known at compile
3194
  //  time.
3195
  //  When AlgoVer==0, the first conjunction evaluates to true, making
3196
  //    the entire predicate true during compile time.
3197
  //  When AlgoVer==1, the second conjunction has only the second part to be
3198
  //    evaluated during runtime.  Other conjunctions evaluates to false
3199
  //    during compile time.
3200
  //  When AlgoVer==2, the third conjunction has only the second part to be
3201
  //    evaluated during runtime.  Other conjunctions evaluates to false
3202
  //    during compile time.
3203
39
  llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3204
3205
39
  llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3206
39
  llvm::Value *CondAlgo1 = Bld.CreateAnd(
3207
39
      Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3208
3209
39
  llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3210
39
  llvm::Value *CondAlgo2 = Bld.CreateAnd(
3211
39
      Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3212
39
  CondAlgo2 = Bld.CreateAnd(
3213
39
      CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3214
3215
39
  llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3216
39
  CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3217
3218
39
  llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3219
39
  llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3220
39
  llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3221
39
  Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3222
3223
39
  CGF.EmitBlock(ThenBB);
3224
  // reduce_function(LocalReduceList, RemoteReduceList)
3225
39
  llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3226
39
      LocalReduceList.getPointer(), CGF.VoidPtrTy);
3227
39
  llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3228
39
      RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3229
39
  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3230
39
      CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3231
39
  Bld.CreateBr(MergeBB);
3232
3233
39
  CGF.EmitBlock(ElseBB);
3234
39
  Bld.CreateBr(MergeBB);
3235
3236
39
  CGF.EmitBlock(MergeBB);
3237
3238
  // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3239
  // Reduce list.
3240
39
  Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3241
39
  llvm::Value *CondCopy = Bld.CreateAnd(
3242
39
      Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3243
3244
39
  llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3245
39
  llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3246
39
  llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3247
39
  Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3248
3249
39
  CGF.EmitBlock(CpyThenBB);
3250
39
  emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3251
39
                        RemoteReduceList, LocalReduceList);
3252
39
  Bld.CreateBr(CpyMergeBB);
3253
3254
39
  CGF.EmitBlock(CpyElseBB);
3255
39
  Bld.CreateBr(CpyMergeBB);
3256
3257
39
  CGF.EmitBlock(CpyMergeBB);
3258
3259
39
  CGF.FinishFunction();
3260
39
  return Fn;
3261
39
}
3262
3263
/// This function emits a helper that copies all the reduction variables from
3264
/// the team into the provided global buffer for the reduction variables.
3265
///
3266
/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3267
///   For all data entries D in reduce_data:
3268
///     Copy local D to buffer.D[Idx]
3269
static llvm::Value *emitListToGlobalCopyFunction(
3270
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3271
    QualType ReductionArrayTy, SourceLocation Loc,
3272
    const RecordDecl *TeamReductionRec,
3273
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3274
18
        &VarFieldMap) {
3275
18
  ASTContext &C = CGM.getContext();
3276
3277
  // Buffer: global reduction buffer.
3278
18
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3279
18
                              C.VoidPtrTy, ImplicitParamDecl::Other);
3280
  // Idx: index of the buffer.
3281
18
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3282
18
                           ImplicitParamDecl::Other);
3283
  // ReduceList: thread local Reduce list.
3284
18
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3285
18
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
3286
18
  FunctionArgList Args;
3287
18
  Args.push_back(&BufferArg);
3288
18
  Args.push_back(&IdxArg);
3289
18
  Args.push_back(&ReduceListArg);
3290
3291
18
  const CGFunctionInfo &CGFI =
3292
18
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3293
18
  auto *Fn = llvm::Function::Create(
3294
18
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3295
18
      "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
3296
18
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3297
18
  Fn->setDoesNotRecurse();
3298
18
  CodeGenFunction CGF(CGM);
3299
18
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3300
3301
18
  CGBuilderTy &Bld = CGF.Builder;
3302
3303
18
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3304
18
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3305
18
  Address LocalReduceList(
3306
18
      Bld.CreatePointerBitCastOrAddrSpaceCast(
3307
18
          CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3308
18
                               C.VoidPtrTy, Loc),
3309
18
          CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3310
18
      CGF.getPointerAlign());
3311
18
  QualType StaticTy = C.getRecordType(TeamReductionRec);
3312
18
  llvm::Type *LLVMReductionsBufferTy =
3313
18
      CGM.getTypes().ConvertTypeForMem(StaticTy);
3314
18
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3315
18
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3316
18
      LLVMReductionsBufferTy->getPointerTo());
3317
18
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3318
18
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3319
18
                                              /*Volatile=*/false, C.IntTy,
3320
18
                                              Loc)};
3321
18
  unsigned Idx = 0;
3322
30
  for (const Expr *Private : Privates) {
3323
    // Reduce element = LocalReduceList[i]
3324
30
    Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3325
30
    llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3326
30
        ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3327
    // elemptr = ((CopyType*)(elemptrptr)) + I
3328
30
    ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3329
30
        ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3330
30
    Address ElemPtr =
3331
30
        Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3332
30
    const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3333
    // Global = Buffer.VD[Idx];
3334
30
    const FieldDecl *FD = VarFieldMap.lookup(VD);
3335
30
    LValue GlobLVal = CGF.EmitLValueForField(
3336
30
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3337
30
    llvm::Value *BufferPtr =
3338
30
        Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3339
30
    GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3340
30
    switch (CGF.getEvaluationKind(Private->getType())) {
3341
30
    case TEK_Scalar: {
3342
30
      llvm::Value *V = CGF.EmitLoadOfScalar(
3343
30
          ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
3344
30
          LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
3345
30
      CGF.EmitStoreOfScalar(V, GlobLVal);
3346
30
      break;
3347
0
    }
3348
0
    case TEK_Complex: {
3349
0
      CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
3350
0
          CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
3351
0
      CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
3352
0
      break;
3353
0
    }
3354
0
    case TEK_Aggregate:
3355
0
      CGF.EmitAggregateCopy(GlobLVal,
3356
0
                            CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3357
0
                            Private->getType(), AggValueSlot::DoesNotOverlap);
3358
0
      break;
3359
30
    }
3360
30
    ++Idx;
3361
30
  }
3362
3363
18
  CGF.FinishFunction();
3364
18
  return Fn;
3365
18
}
3366
3367
/// This function emits a helper that reduces all the reduction variables from
3368
/// the team into the provided global buffer for the reduction variables.
3369
///
3370
/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
3371
///  void *GlobPtrs[];
3372
///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
3373
///  ...
3374
///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
3375
///  reduce_function(GlobPtrs, reduce_data);
3376
static llvm::Value *emitListToGlobalReduceFunction(
3377
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3378
    QualType ReductionArrayTy, SourceLocation Loc,
3379
    const RecordDecl *TeamReductionRec,
3380
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3381
        &VarFieldMap,
3382
18
    llvm::Function *ReduceFn) {
3383
18
  ASTContext &C = CGM.getContext();
3384
3385
  // Buffer: global reduction buffer.
3386
18
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3387
18
                              C.VoidPtrTy, ImplicitParamDecl::Other);
3388
  // Idx: index of the buffer.
3389
18
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3390
18
                           ImplicitParamDecl::Other);
3391
  // ReduceList: thread local Reduce list.
3392
18
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3393
18
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
3394
18
  FunctionArgList Args;
3395
18
  Args.push_back(&BufferArg);
3396
18
  Args.push_back(&IdxArg);
3397
18
  Args.push_back(&ReduceListArg);
3398
3399
18
  const CGFunctionInfo &CGFI =
3400
18
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3401
18
  auto *Fn = llvm::Function::Create(
3402
18
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3403
18
      "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
3404
18
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3405
18
  Fn->setDoesNotRecurse();
3406
18
  CodeGenFunction CGF(CGM);
3407
18
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3408
3409
18
  CGBuilderTy &Bld = CGF.Builder;
3410
3411
18
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3412
18
  QualType StaticTy = C.getRecordType(TeamReductionRec);
3413
18
  llvm::Type *LLVMReductionsBufferTy =
3414
18
      CGM.getTypes().ConvertTypeForMem(StaticTy);
3415
18
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3416
18
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3417
18
      LLVMReductionsBufferTy->getPointerTo());
3418
3419
  // 1. Build a list of reduction variables.
3420
  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3421
18
  Address ReductionList =
3422
18
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3423
18
  auto IPriv = Privates.begin();
3424
18
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3425
18
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3426
18
                                              /*Volatile=*/false, C.IntTy,
3427
18
                                              Loc)};
3428
18
  unsigned Idx = 0;
3429
48
  for (unsigned I = 0, E = Privates.size(); I < E; 
++I, ++IPriv, ++Idx30
) {
3430
30
    Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3431
    // Global = Buffer.VD[Idx];
3432
30
    const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3433
30
    const FieldDecl *FD = VarFieldMap.lookup(VD);
3434
30
    LValue GlobLVal = CGF.EmitLValueForField(
3435
30
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3436
30
    llvm::Value *BufferPtr =
3437
30
        Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3438
30
    llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3439
30
    CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3440
30
    if ((*IPriv)->getType()->isVariablyModifiedType()) {
3441
      // Store array size.
3442
0
      ++Idx;
3443
0
      Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3444
0
      llvm::Value *Size = CGF.Builder.CreateIntCast(
3445
0
          CGF.getVLASize(
3446
0
                 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3447
0
              .NumElts,
3448
0
          CGF.SizeTy, /*isSigned=*/false);
3449
0
      CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3450
0
                              Elem);
3451
0
    }
3452
30
  }
3453
3454
  // Call reduce_function(GlobalReduceList, ReduceList)
3455
18
  llvm::Value *GlobalReduceList =
3456
18
      CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3457
18
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3458
18
  llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3459
18
      AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3460
18
  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3461
18
      CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
3462
18
  CGF.FinishFunction();
3463
18
  return Fn;
3464
18
}
3465
3466
/// This function emits a helper that copies all the reduction variables from
3467
/// the team into the provided global buffer for the reduction variables.
3468
///
3469
/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3470
///   For all data entries D in reduce_data:
3471
///     Copy buffer.D[Idx] to local D;
3472
static llvm::Value *emitGlobalToListCopyFunction(
3473
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3474
    QualType ReductionArrayTy, SourceLocation Loc,
3475
    const RecordDecl *TeamReductionRec,
3476
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3477
18
        &VarFieldMap) {
3478
18
  ASTContext &C = CGM.getContext();
3479
3480
  // Buffer: global reduction buffer.
3481
18
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3482
18
                              C.VoidPtrTy, ImplicitParamDecl::Other);
3483
  // Idx: index of the buffer.
3484
18
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3485
18
                           ImplicitParamDecl::Other);
3486
  // ReduceList: thread local Reduce list.
3487
18
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3488
18
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
3489
18
  FunctionArgList Args;
3490
18
  Args.push_back(&BufferArg);
3491
18
  Args.push_back(&IdxArg);
3492
18
  Args.push_back(&ReduceListArg);
3493
3494
18
  const CGFunctionInfo &CGFI =
3495
18
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3496
18
  auto *Fn = llvm::Function::Create(
3497
18
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3498
18
      "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
3499
18
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3500
18
  Fn->setDoesNotRecurse();
3501
18
  CodeGenFunction CGF(CGM);
3502
18
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3503
3504
18
  CGBuilderTy &Bld = CGF.Builder;
3505
3506
18
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3507
18
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3508
18
  Address LocalReduceList(
3509
18
      Bld.CreatePointerBitCastOrAddrSpaceCast(
3510
18
          CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3511
18
                               C.VoidPtrTy, Loc),
3512
18
          CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3513
18
      CGF.getPointerAlign());
3514
18
  QualType StaticTy = C.getRecordType(TeamReductionRec);
3515
18
  llvm::Type *LLVMReductionsBufferTy =
3516
18
      CGM.getTypes().ConvertTypeForMem(StaticTy);
3517
18
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3518
18
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3519
18
      LLVMReductionsBufferTy->getPointerTo());
3520
3521
18
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3522
18
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3523
18
                                              /*Volatile=*/false, C.IntTy,
3524
18
                                              Loc)};
3525
18
  unsigned Idx = 0;
3526
30
  for (const Expr *Private : Privates) {
3527
    // Reduce element = LocalReduceList[i]
3528
30
    Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3529
30
    llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3530
30
        ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3531
    // elemptr = ((CopyType*)(elemptrptr)) + I
3532
30
    ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3533
30
        ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3534
30
    Address ElemPtr =
3535
30
        Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3536
30
    const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3537
    // Global = Buffer.VD[Idx];
3538
30
    const FieldDecl *FD = VarFieldMap.lookup(VD);
3539
30
    LValue GlobLVal = CGF.EmitLValueForField(
3540
30
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3541
30
    llvm::Value *BufferPtr =
3542
30
        Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3543
30
    GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3544
30
    switch (CGF.getEvaluationKind(Private->getType())) {
3545
30
    case TEK_Scalar: {
3546
30
      llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
3547
30
      CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
3548
30
                            LValueBaseInfo(AlignmentSource::Type),
3549
30
                            TBAAAccessInfo());
3550
30
      break;
3551
0
    }
3552
0
    case TEK_Complex: {
3553
0
      CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
3554
0
      CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3555
0
                             /*isInit=*/false);
3556
0
      break;
3557
0
    }
3558
0
    case TEK_Aggregate:
3559
0
      CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3560
0
                            GlobLVal, Private->getType(),
3561
0
                            AggValueSlot::DoesNotOverlap);
3562
0
      break;
3563
30
    }
3564
30
    ++Idx;
3565
30
  }
3566
3567
18
  CGF.FinishFunction();
3568
18
  return Fn;
3569
18
}
3570
3571
/// This function emits a helper that reduces all the reduction variables from
3572
/// the team into the provided global buffer for the reduction variables.
3573
///
3574
/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
3575
///  void *GlobPtrs[];
3576
///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
3577
///  ...
3578
///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
3579
///  reduce_function(reduce_data, GlobPtrs);
3580
static llvm::Value *emitGlobalToListReduceFunction(
3581
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3582
    QualType ReductionArrayTy, SourceLocation Loc,
3583
    const RecordDecl *TeamReductionRec,
3584
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3585
        &VarFieldMap,
3586
18
    llvm::Function *ReduceFn) {
3587
18
  ASTContext &C = CGM.getContext();
3588
3589
  // Buffer: global reduction buffer.
3590
18
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3591
18
                              C.VoidPtrTy, ImplicitParamDecl::Other);
3592
  // Idx: index of the buffer.
3593
18
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3594
18
                           ImplicitParamDecl::Other);
3595
  // ReduceList: thread local Reduce list.
3596
18
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3597
18
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
3598
18
  FunctionArgList Args;
3599
18
  Args.push_back(&BufferArg);
3600
18
  Args.push_back(&IdxArg);
3601
18
  Args.push_back(&ReduceListArg);
3602
3603
18
  const CGFunctionInfo &CGFI =
3604
18
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3605
18
  auto *Fn = llvm::Function::Create(
3606
18
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3607
18
      "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
3608
18
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3609
18
  Fn->setDoesNotRecurse();
3610
18
  CodeGenFunction CGF(CGM);
3611
18
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3612
3613
18
  CGBuilderTy &Bld = CGF.Builder;
3614
3615
18
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3616
18
  QualType StaticTy = C.getRecordType(TeamReductionRec);
3617
18
  llvm::Type *LLVMReductionsBufferTy =
3618
18
      CGM.getTypes().ConvertTypeForMem(StaticTy);
3619
18
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3620
18
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3621
18
      LLVMReductionsBufferTy->getPointerTo());
3622
3623
  // 1. Build a list of reduction variables.
3624
  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3625
18
  Address ReductionList =
3626
18
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3627
18
  auto IPriv = Privates.begin();
3628
18
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3629
18
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3630
18
                                              /*Volatile=*/false, C.IntTy,
3631
18
                                              Loc)};
3632
18
  unsigned Idx = 0;
3633
48
  for (unsigned I = 0, E = Privates.size(); I < E; 
++I, ++IPriv, ++Idx30
) {
3634
30
    Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3635
    // Global = Buffer.VD[Idx];
3636
30
    const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3637
30
    const FieldDecl *FD = VarFieldMap.lookup(VD);
3638
30
    LValue GlobLVal = CGF.EmitLValueForField(
3639
30
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3640
30
    llvm::Value *BufferPtr =
3641
30
        Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3642
30
    llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3643
30
    CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3644
30
    if ((*IPriv)->getType()->isVariablyModifiedType()) {
3645
      // Store array size.
3646
0
      ++Idx;
3647
0
      Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3648
0
      llvm::Value *Size = CGF.Builder.CreateIntCast(
3649
0
          CGF.getVLASize(
3650
0
                 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3651
0
              .NumElts,
3652
0
          CGF.SizeTy, /*isSigned=*/false);
3653
0
      CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3654
0
                              Elem);
3655
0
    }
3656
30
  }
3657
3658
  // Call reduce_function(ReduceList, GlobalReduceList)
3659
18
  llvm::Value *GlobalReduceList =
3660
18
      CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3661
18
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3662
18
  llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3663
18
      AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3664
18
  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3665
18
      CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
3666
18
  CGF.FinishFunction();
3667
18
  return Fn;
3668
18
}
3669
3670
///
3671
/// Design of OpenMP reductions on the GPU
3672
///
3673
/// Consider a typical OpenMP program with one or more reduction
3674
/// clauses:
3675
///
3676
/// float foo;
3677
/// double bar;
3678
/// #pragma omp target teams distribute parallel for \
3679
///             reduction(+:foo) reduction(*:bar)
3680
/// for (int i = 0; i < N; i++) {
3681
///   foo += A[i]; bar *= B[i];
3682
/// }
3683
///
3684
/// where 'foo' and 'bar' are reduced across all OpenMP threads in
3685
/// all teams.  In our OpenMP implementation on the NVPTX device an
3686
/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
3687
/// within a team are mapped to CUDA threads within a threadblock.
3688
/// Our goal is to efficiently aggregate values across all OpenMP
3689
/// threads such that:
3690
///
3691
///   - the compiler and runtime are logically concise, and
3692
///   - the reduction is performed efficiently in a hierarchical
3693
///     manner as follows: within OpenMP threads in the same warp,
3694
///     across warps in a threadblock, and finally across teams on
3695
///     the NVPTX device.
3696
///
3697
/// Introduction to Decoupling
3698
///
3699
/// We would like to decouple the compiler and the runtime so that the
3700
/// latter is ignorant of the reduction variables (number, data types)
3701
/// and the reduction operators.  This allows a simpler interface
3702
/// and implementation while still attaining good performance.
3703
///
3704
/// Pseudocode for the aforementioned OpenMP program generated by the
3705
/// compiler is as follows:
3706
///
3707
/// 1. Create private copies of reduction variables on each OpenMP
3708
///    thread: 'foo_private', 'bar_private'
3709
/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
3710
///    to it and writes the result in 'foo_private' and 'bar_private'
3711
///    respectively.
3712
/// 3. Call the OpenMP runtime on the GPU to reduce within a team
3713
///    and store the result on the team master:
3714
///
3715
///     __kmpc_nvptx_parallel_reduce_nowait_v2(...,
3716
///        reduceData, shuffleReduceFn, interWarpCpyFn)
3717
///
3718
///     where:
3719
///       struct ReduceData {
3720
///         double *foo;
3721
///         double *bar;
3722
///       } reduceData
3723
///       reduceData.foo = &foo_private
3724
///       reduceData.bar = &bar_private
3725
///
3726
///     'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
3727
///     auxiliary functions generated by the compiler that operate on
3728
///     variables of type 'ReduceData'.  They aid the runtime perform
3729
///     algorithmic steps in a data agnostic manner.
3730
///
3731
///     'shuffleReduceFn' is a pointer to a function that reduces data
3732
///     of type 'ReduceData' across two OpenMP threads (lanes) in the
3733
///     same warp.  It takes the following arguments as input:
3734
///
3735
///     a. variable of type 'ReduceData' on the calling lane,
3736
///     b. its lane_id,
3737
///     c. an offset relative to the current lane_id to generate a
3738
///        remote_lane_id.  The remote lane contains the second
3739
///        variable of type 'ReduceData' that is to be reduced.
3740
///     d. an algorithm version parameter determining which reduction
3741
///        algorithm to use.
3742
///
3743
///     'shuffleReduceFn' retrieves data from the remote lane using
3744
///     efficient GPU shuffle intrinsics and reduces, using the
3745
///     algorithm specified by the 4th parameter, the two operands
3746
///     element-wise.  The result is written to the first operand.
3747
///
3748
///     Different reduction algorithms are implemented in different
3749
///     runtime functions, all calling 'shuffleReduceFn' to perform
3750
///     the essential reduction step.  Therefore, based on the 4th
3751
///     parameter, this function behaves slightly differently to
3752
///     cooperate with the runtime to ensure correctness under
3753
///     different circumstances.
3754
///
3755
///     'InterWarpCpyFn' is a pointer to a function that transfers
3756
///     reduced variables across warps.  It tunnels, through CUDA
3757
///     shared memory, the thread-private data of type 'ReduceData'
3758
///     from lane 0 of each warp to a lane in the first warp.
3759
/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3760
///    The last team writes the global reduced value to memory.
3761
///
3762
///     ret = __kmpc_nvptx_teams_reduce_nowait(...,
3763
///             reduceData, shuffleReduceFn, interWarpCpyFn,
3764
///             scratchpadCopyFn, loadAndReduceFn)
3765
///
3766
///     'scratchpadCopyFn' is a helper that stores reduced
3767
///     data from the team master to a scratchpad array in
3768
///     global memory.
3769
///
3770
///     'loadAndReduceFn' is a helper that loads data from
3771
///     the scratchpad array and reduces it with the input
3772
///     operand.
3773
///
3774
///     These compiler generated functions hide address
3775
///     calculation and alignment information from the runtime.
3776
/// 5. if ret == 1:
3777
///     The team master of the last team stores the reduced
3778
///     result to the globals in memory.
3779
///     foo += reduceData.foo; bar *= reduceData.bar
3780
///
3781
///
3782
/// Warp Reduction Algorithms
3783
///
3784
/// On the warp level, we have three algorithms implemented in the
3785
/// OpenMP runtime depending on the number of active lanes:
3786
///
3787
/// Full Warp Reduction
3788
///
3789
/// The reduce algorithm within a warp where all lanes are active
3790
/// is implemented in the runtime as follows:
3791
///
3792
/// full_warp_reduce(void *reduce_data,
3793
///                  kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3794
///   for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3795
///     ShuffleReduceFn(reduce_data, 0, offset, 0);
3796
/// }
3797
///
3798
/// The algorithm completes in log(2, WARPSIZE) steps.
3799
///
3800
/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3801
/// not used therefore we save instructions by not retrieving lane_id
3802
/// from the corresponding special registers.  The 4th parameter, which
3803
/// represents the version of the algorithm being used, is set to 0 to
3804
/// signify full warp reduction.
3805
///
3806
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3807
///
3808
/// #reduce_elem refers to an element in the local lane's data structure
3809
/// #remote_elem is retrieved from a remote lane
3810
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3811
/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3812
///
3813
/// Contiguous Partial Warp Reduction
3814
///
3815
/// This reduce algorithm is used within a warp where only the first
3816
/// 'n' (n <= WARPSIZE) lanes are active.  It is typically used when the
3817
/// number of OpenMP threads in a parallel region is not a multiple of
3818
/// WARPSIZE.  The algorithm is implemented in the runtime as follows:
3819
///
3820
/// void
3821
/// contiguous_partial_reduce(void *reduce_data,
3822
///                           kmp_ShuffleReductFctPtr ShuffleReduceFn,
3823
///                           int size, int lane_id) {
3824
///   int curr_size;
3825
///   int offset;
3826
///   curr_size = size;
3827
///   mask = curr_size/2;
3828
///   while (offset>0) {
3829
///     ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3830
///     curr_size = (curr_size+1)/2;
3831
///     offset = curr_size/2;
3832
///   }
3833
/// }
3834
///
3835
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3836
///
3837
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3838
/// if (lane_id < offset)
3839
///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3840
/// else
3841
///     reduce_elem = remote_elem
3842
///
3843
/// This algorithm assumes that the data to be reduced are located in a
3844
/// contiguous subset of lanes starting from the first.  When there is
3845
/// an odd number of active lanes, the data in the last lane is not
3846
/// aggregated with any other lane's dat but is instead copied over.
3847
///
3848
/// Dispersed Partial Warp Reduction
3849
///
3850
/// This algorithm is used within a warp when any discontiguous subset of
3851
/// lanes are active.  It is used to implement the reduction operation
3852
/// across lanes in an OpenMP simd region or in a nested parallel region.
3853
///
3854
/// void
3855
/// dispersed_partial_reduce(void *reduce_data,
3856
///                          kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3857
///   int size, remote_id;
3858
///   int logical_lane_id = number_of_active_lanes_before_me() * 2;
3859
///   do {
3860
///       remote_id = next_active_lane_id_right_after_me();
3861
///       # the above function returns 0 of no active lane
3862
///       # is present right after the current lane.
3863
///       size = number_of_active_lanes_in_this_warp();
3864
///       logical_lane_id /= 2;
3865
///       ShuffleReduceFn(reduce_data, logical_lane_id,
3866
///                       remote_id-1-threadIdx.x, 2);
3867
///   } while (logical_lane_id % 2 == 0 && size > 1);
3868
/// }
3869
///
3870
/// There is no assumption made about the initial state of the reduction.
3871
/// Any number of lanes (>=1) could be active at any position.  The reduction
3872
/// result is returned in the first active lane.
3873
///
3874
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3875
///
3876
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3877
/// if (lane_id % 2 == 0 && offset > 0)
3878
///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3879
/// else
3880
///     reduce_elem = remote_elem
3881
///
3882
///
3883
/// Intra-Team Reduction
3884
///
3885
/// This function, as implemented in the runtime call
3886
/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3887
/// threads in a team.  It first reduces within a warp using the
3888
/// aforementioned algorithms.  We then proceed to gather all such
3889
/// reduced values at the first warp.
3890
///
3891
/// The runtime makes use of the function 'InterWarpCpyFn', which copies
3892
/// data from each of the "warp master" (zeroth lane of each warp, where
3893
/// warp-reduced data is held) to the zeroth warp.  This step reduces (in
3894
/// a mathematical sense) the problem of reduction across warp masters in
3895
/// a block to the problem of warp reduction.
3896
///
3897
///
3898
/// Inter-Team Reduction
3899
///
3900
/// Once a team has reduced its data to a single value, it is stored in
3901
/// a global scratchpad array.  Since each team has a distinct slot, this
3902
/// can be done without locking.
3903
///
3904
/// The last team to write to the scratchpad array proceeds to reduce the
3905
/// scratchpad array.  One or more workers in the last team use the helper
3906
/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3907
/// the k'th worker reduces every k'th element.
3908
///
3909
/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3910
/// reduce across workers and compute a globally reduced value.
3911
///
3912
void CGOpenMPRuntimeGPU::emitReduction(
3913
    CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3914
    ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3915
45
    ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3916
45
  if (!CGF.HaveInsertPoint())
3917
0
    return;
3918
3919
45
  bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3920
45
#ifndef NDEBUG
3921
45
  bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3922
45
#endif
3923
3924
45
  if (Options.SimpleReduction) {
3925
6
    assert(!TeamsReduction && !ParallelReduction &&
3926
6
           "Invalid reduction selection in emitReduction.");
3927
6
    CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3928
6
                                   ReductionOps, Options);
3929
6
    return;
3930
6
  }
3931
3932
39
  assert((TeamsReduction || ParallelReduction) &&
3933
39
         "Invalid reduction selection in emitReduction.");
3934
3935
  // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3936
  // RedList, shuffle_reduce_func, interwarp_copy_func);
3937
  // or
3938
  // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3939
39
  llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3940
39
  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3941
3942
39
  llvm::Value *Res;
3943
39
  ASTContext &C = CGM.getContext();
3944
  // 1. Build a list of reduction variables.
3945
  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3946
39
  auto Size = RHSExprs.size();
3947
63
  for (const Expr *E : Privates) {
3948
63
    if (E->getType()->isVariablyModifiedType())
3949
      // Reserve place for array size.
3950
0
      ++Size;
3951
63
  }
3952
39
  llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3953
39
  QualType ReductionArrayTy =
3954
39
      C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3955
39
                             /*IndexTypeQuals=*/0);
3956
39
  Address ReductionList =
3957
39
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3958
39
  auto IPriv = Privates.begin();
3959
39
  unsigned Idx = 0;
3960
102
  for (unsigned I = 0, E = RHSExprs.size(); I < E; 
++I, ++IPriv, ++Idx63
) {
3961
63
    Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3962
63
    CGF.Builder.CreateStore(
3963
63
        CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3964
63
            CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3965
63
        Elem);
3966
63
    if ((*IPriv)->getType()->isVariablyModifiedType()) {
3967
      // Store array size.
3968
0
      ++Idx;
3969
0
      Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3970
0
      llvm::Value *Size = CGF.Builder.CreateIntCast(
3971
0
          CGF.getVLASize(
3972
0
                 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3973
0
              .NumElts,
3974
0
          CGF.SizeTy, /*isSigned=*/false);
3975
0
      CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3976
0
                              Elem);
3977
0
    }
3978
63
  }
3979
3980
39
  llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3981
39
      ReductionList.getPointer(), CGF.VoidPtrTy);
3982
39
  llvm::Function *ReductionFn = emitReductionFunction(
3983
39
      Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
3984
39
      LHSExprs, RHSExprs, ReductionOps);
3985
39
  llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3986
39
  llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3987
39
      CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3988
39
  llvm::Value *InterWarpCopyFn =
3989
39
      emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3990
3991
39
  if (ParallelReduction) {
3992
21
    llvm::Value *Args[] = {RTLoc,
3993
21
                           ThreadId,
3994
21
                           CGF.Builder.getInt32(RHSExprs.size()),
3995
21
                           ReductionArrayTySize,
3996
21
                           RL,
3997
21
                           ShuffleAndReduceFn,
3998
21
                           InterWarpCopyFn};
3999
4000
21
    Res = CGF.EmitRuntimeCall(
4001
21
        OMPBuilder.getOrCreateRuntimeFunction(
4002
21
            CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
4003
21
        Args);
4004
18
  } else {
4005
18
    assert(TeamsReduction && "expected teams reduction.");
4006
18
    llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
4007
18
    llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
4008
18
    int Cnt = 0;
4009
30
    for (const Expr *DRE : Privates) {
4010
30
      PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
4011
30
      ++Cnt;
4012
30
    }
4013
18
    const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
4014
18
        CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
4015
18
        C.getLangOpts().OpenMPCUDAReductionBufNum);
4016
18
    TeamsReductions.push_back(TeamReductionRec);
4017
18
    if (!KernelTeamsReductionPtr) {
4018
6
      KernelTeamsReductionPtr = new llvm::GlobalVariable(
4019
6
          CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
4020
6
          llvm::GlobalValue::InternalLinkage, nullptr,
4021
6
          "_openmp_teams_reductions_buffer_$_$ptr");
4022
6
    }
4023
18
    llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
4024
18
        Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
4025
18
        /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
4026
18
    llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
4027
18
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4028
18
    llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
4029
18
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4030
18
        ReductionFn);
4031
18
    llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
4032
18
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4033
18
    llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
4034
18
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4035
18
        ReductionFn);
4036
4037
18
    llvm::Value *Args[] = {
4038
18
        RTLoc,
4039
18
        ThreadId,
4040
18
        GlobalBufferPtr,
4041
18
        CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
4042
18
        RL,
4043
18
        ShuffleAndReduceFn,
4044
18
        InterWarpCopyFn,
4045
18
        GlobalToBufferCpyFn,
4046
18
        GlobalToBufferRedFn,
4047
18
        BufferToGlobalCpyFn,
4048
18
        BufferToGlobalRedFn};
4049
4050
18
    Res = CGF.EmitRuntimeCall(
4051
18
        OMPBuilder.getOrCreateRuntimeFunction(
4052
18
            CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
4053
18
        Args);
4054
18
  }
4055
4056
  // 5. Build if (res == 1)
4057
39
  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
4058
39
  llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
4059
39
  llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
4060
39
      Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
4061
39
  CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
4062
4063
  // 6. Build then branch: where we have reduced values in the master
4064
  //    thread in each team.
4065
  //    __kmpc_end_reduce{_nowait}(<gtid>);
4066
  //    break;
4067
39
  CGF.EmitBlock(ThenBB);
4068
4069
  // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
4070
39
  auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
4071
39
                    this](CodeGenFunction &CGF, PrePostActionTy &Action) {
4072
39
    auto IPriv = Privates.begin();
4073
39
    auto ILHS = LHSExprs.begin();
4074
39
    auto IRHS = RHSExprs.begin();
4075
63
    for (const Expr *E : ReductionOps) {
4076
63
      emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
4077
63
                                  cast<DeclRefExpr>(*IRHS));
4078
63
      ++IPriv;
4079
63
      ++ILHS;
4080
63
      ++IRHS;
4081
63
    }
4082
39
  };
4083
39
  llvm::Value *EndArgs[] = {ThreadId};
4084
39
  RegionCodeGenTy RCG(CodeGen);
4085
39
  NVPTXActionTy Action(
4086
39
      nullptr, llvm::None,
4087
39
      OMPBuilder.getOrCreateRuntimeFunction(
4088
39
          CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
4089
39
      EndArgs);
4090
39
  RCG.setAction(Action);
4091
39
  RCG(CGF);
4092
  // There is no need to emit line number for unconditional branch.
4093
39
  (void)ApplyDebugLocation::CreateEmpty(CGF);
4094
39
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4095
39
}
4096
4097
const VarDecl *
4098
CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
4099
79
                                       const VarDecl *NativeParam) const {
4100
79
  if (!NativeParam->getType()->isReferenceType())
4101
25
    return NativeParam;
4102
54
  QualType ArgType = NativeParam->getType();
4103
54
  QualifierCollector QC;
4104
54
  const Type *NonQualTy = QC.strip(ArgType);
4105
54
  QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4106
54
  if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
4107
54
    if (Attr->getCaptureKind() == OMPC_map) {
4108
38
      PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4109
38
                                                        LangAS::opencl_global);
4110
16
    } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
4111
16
               PointeeTy.isConstant(CGM.getContext())) {
4112
2
      PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4113
2
                                                        LangAS::opencl_generic);
4114
2
    }
4115
54
  }
4116
54
  ArgType = CGM.getContext().getPointerType(PointeeTy);
4117
54
  QC.addRestrict();
4118
54
  enum { NVPTX_local_addr = 5 };
4119
54
  QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
4120
54
  ArgType = QC.apply(CGM.getContext(), ArgType);
4121
54
  if (isa<ImplicitParamDecl>(NativeParam))
4122
0
    return ImplicitParamDecl::Create(
4123
0
        CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
4124
0
        NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
4125
54
  return ParmVarDecl::Create(
4126
54
      CGM.getContext(),
4127
54
      const_cast<DeclContext *>(NativeParam->getDeclContext()),
4128
54
      NativeParam->getBeginLoc(), NativeParam->getLocation(),
4129
54
      NativeParam->getIdentifier(), ArgType,
4130
54
      /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
4131
54
}
4132
4133
Address
4134
CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
4135
                                          const VarDecl *NativeParam,
4136
54
                                          const VarDecl *TargetParam) const {
4137
54
  assert(NativeParam != TargetParam &&
4138
54
         NativeParam->getType()->isReferenceType() &&
4139
54
         "Native arg must not be the same as target arg.");
4140
54
  Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
4141
54
  QualType NativeParamType = NativeParam->getType();
4142
54
  QualifierCollector QC;
4143
54
  const Type *NonQualTy = QC.strip(NativeParamType);
4144
54
  QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4145
54
  unsigned NativePointeeAddrSpace =
4146
54
      CGF.getContext().getTargetAddressSpace(NativePointeeTy);
4147
54
  QualType TargetTy = TargetParam->getType();
4148
54
  llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
4149
54
      LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
4150
  // First cast to generic.
4151
54
  TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4152
54
      TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4153
54
                      /*AddrSpace=*/0));
4154
  // Cast from generic to native address space.
4155
54
  TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4156
54
      TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4157
54
                      NativePointeeAddrSpace));
4158
54
  Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
4159
54
  CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
4160
54
                        NativeParamType);
4161
54
  return NativeParamAddr;
4162
54
}
4163
4164
void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
4165
    CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
4166
1.29k
    ArrayRef<llvm::Value *> Args) const {
4167
1.29k
  SmallVector<llvm::Value *, 4> TargetArgs;
4168
1.29k
  TargetArgs.reserve(Args.size());
4169
1.29k
  auto *FnType = OutlinedFn.getFunctionType();
4170
5.78k
  for (unsigned I = 0, E = Args.size(); I < E; 
++I4.48k
) {
4171
4.49k
    if (FnType->isVarArg() && 
FnType->getNumParams() <= I2
) {
4172
1
      TargetArgs.append(std::next(Args.begin(), I), Args.end());
4173
1
      break;
4174
1
    }
4175
4.48k
    llvm::Type *TargetType = FnType->getParamType(I);
4176
4.48k
    llvm::Value *NativeArg = Args[I];
4177
4.48k
    if (!TargetType->isPointerTy()) {
4178
1.18k
      TargetArgs.emplace_back(NativeArg);
4179
1.18k
      continue;
4180
1.18k
    }
4181
3.30k
    llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4182
3.30k
        NativeArg,
4183
3.30k
        NativeArg->getType()->getPointerElementType()->getPointerTo());
4184
3.30k
    TargetArgs.emplace_back(
4185
3.30k
        CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
4186
3.30k
  }
4187
1.29k
  CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
4188
1.29k
}
4189
4190
/// Emit function which wraps the outline parallel region
4191
/// and controls the arguments which are passed to this function.
4192
/// The wrapper ensures that the outlined function is called
4193
/// with the correct arguments when data is shared.
4194
llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
4195
74
    llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
4196
74
  ASTContext &Ctx = CGM.getContext();
4197
74
  const auto &CS = *D.getCapturedStmt(OMPD_parallel);
4198
4199
  // Create a function that takes as argument the source thread.
4200
74
  FunctionArgList WrapperArgs;
4201
74
  QualType Int16QTy =
4202
74
      Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
4203
74
  QualType Int32QTy =
4204
74
      Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
4205
74
  ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4206
74
                                     /*Id=*/nullptr, Int16QTy,
4207
74
                                     ImplicitParamDecl::Other);
4208
74
  ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4209
74
                               /*Id=*/nullptr, Int32QTy,
4210
74
                               ImplicitParamDecl::Other);
4211
74
  WrapperArgs.emplace_back(&ParallelLevelArg);
4212
74
  WrapperArgs.emplace_back(&WrapperArg);
4213
4214
74
  const CGFunctionInfo &CGFI =
4215
74
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
4216
4217
74
  auto *Fn = llvm::Function::Create(
4218
74
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4219
74
      Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
4220
74
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4221
74
  Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
4222
74
  Fn->setDoesNotRecurse();
4223
4224
74
  CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4225
74
  CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
4226
74
                    D.getBeginLoc(), D.getBeginLoc());
4227
4228
74
  const auto *RD = CS.getCapturedRecordDecl();
4229
74
  auto CurField = RD->field_begin();
4230
4231
74
  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
4232
74
                                                      /*Name=*/".zero.addr");
4233
74
  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
4234
  // Get the array of arguments.
4235
74
  SmallVector<llvm::Value *, 8> Args;
4236
4237
74
  Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
4238
74
  Args.emplace_back(ZeroAddr.getPointer());
4239
4240
74
  CGBuilderTy &Bld = CGF.Builder;
4241
74
  auto CI = CS.capture_begin();
4242
4243
  // Use global memory for data sharing.
4244
  // Handle passing of global args to workers.
4245
74
  Address GlobalArgs =
4246
74
      CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
4247
74
  llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
4248
74
  llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
4249
74
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4250
74
                          CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
4251
74
                      DataSharingArgs);
4252
4253
  // Retrieve the shared variables from the list of references returned
4254
  // by the runtime. Pass the variables to the outlined function.
4255
74
  Address SharedArgListAddress = Address::invalid();
4256
74
  if (CS.capture_size() > 0 ||
4257
47
      isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4258
27
    SharedArgListAddress = CGF.EmitLoadOfPointer(
4259
27
        GlobalArgs, CGF.getContext()
4260
27
                        .getPointerType(CGF.getContext().getPointerType(
4261
27
                            CGF.getContext().VoidPtrTy))
4262
27
                        .castAs<PointerType>());
4263
27
  }
4264
74
  unsigned Idx = 0;
4265
74
  if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4266
0
    Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4267
0
    Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4268
0
        Src, CGF.SizeTy->getPointerTo());
4269
0
    llvm::Value *LB = CGF.EmitLoadOfScalar(
4270
0
        TypedAddress,
4271
0
        /*Volatile=*/false,
4272
0
        CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4273
0
        cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
4274
0
    Args.emplace_back(LB);
4275
0
    ++Idx;
4276
0
    Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4277
0
    TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4278
0
        Src, CGF.SizeTy->getPointerTo());
4279
0
    llvm::Value *UB = CGF.EmitLoadOfScalar(
4280
0
        TypedAddress,
4281
0
        /*Volatile=*/false,
4282
0
        CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4283
0
        cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
4284
0
    Args.emplace_back(UB);
4285
0
    ++Idx;
4286
0
  }
4287
74
  if (CS.capture_size() > 0) {
4288
27
    ASTContext &CGFContext = CGF.getContext();
4289
73
    for (unsigned I = 0, E = CS.capture_size(); I < E; 
++I, ++CI, ++CurField46
) {
4290
46
      QualType ElemTy = CurField->getType();
4291
46
      Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
4292
46
      Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4293
46
          Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
4294
46
      llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
4295
46
                                              /*Volatile=*/false,
4296
46
                                              CGFContext.getPointerType(ElemTy),
4297
46
                                              CI->getLocation());
4298
46
      if (CI->capturesVariableByCopy() &&
4299
0
          !CI->getCapturedVar()->getType()->isAnyPointerType()) {
4300
0
        Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
4301
0
                              CI->getLocation());
4302
0
      }
4303
46
      Args.emplace_back(Arg);
4304
46
    }
4305
27
  }
4306
4307
74
  emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
4308
74
  CGF.FinishFunction();
4309
74
  return Fn;
4310
74
}
4311
4312
void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
4313
2.51k
                                              const Decl *D) {
4314
2.51k
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
4315
632
    return;
4316
4317
1.88k
  assert(D && "Expected function or captured|block decl.");
4318
1.88k
  assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
4319
1.88k
         "Function is registered already.");
4320
1.88k
  assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
4321
1.88k
         "Team is set but not processed.");
4322
1.88k
  const Stmt *Body = nullptr;
4323
1.88k
  bool NeedToDelayGlobalization = false;
4324
1.88k
  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
4325
449
    Body = FD->getBody();
4326
1.43k
  } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
4327
0
    Body = BD->getBody();
4328
1.43k
  } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
4329
1.43k
    Body = CD->getBody();
4330
1.43k
    NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
4331
1.43k
    if (NeedToDelayGlobalization &&
4332
1.43k
        getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
4333
1.10k
      return;
4334
777
  }
4335
777
  if (!Body)
4336
0
    return;
4337
777
  CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
4338
777
  VarChecker.Visit(Body);
4339
777
  const RecordDecl *GlobalizedVarsRecord =
4340
777
      VarChecker.getGlobalizedRecord(IsInTTDRegion);
4341
777
  TeamAndReductions.first = nullptr;
4342
777
  TeamAndReductions.second.clear();
4343
777
  ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
4344
777
      VarChecker.getEscapedVariableLengthDecls();
4345
777
  if (!GlobalizedVarsRecord && 
EscapedVariableLengthDecls.empty()690
)
4346
690
    return;
4347
87
  auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
4348
87
  I->getSecond().MappedParams =
4349
87
      std::make_unique<CodeGenFunction::OMPMapVars>();
4350
87
  I->getSecond().GlobalRecord = GlobalizedVarsRecord;
4351
87
  I->getSecond().EscapedParameters.insert(
4352
87
      VarChecker.getEscapedParameters().begin(),
4353
87
      VarChecker.getEscapedParameters().end());
4354
87
  I->getSecond().EscapedVariableLengthDecls.append(
4355
87
      EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
4356
87
  DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
4357
117
  for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4358
117
    assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4359
117
    const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4360
117
    Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
4361
117
  }
4362
87
  if (!IsInTTDRegion && 
!NeedToDelayGlobalization14
&&
!IsInParallelRegion11
) {
4363
10
    CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
4364
10
    VarChecker.Visit(Body);
4365
10
    I->getSecond().SecondaryGlobalRecord =
4366
10
        VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
4367
10
    I->getSecond().SecondaryLocalVarData.emplace();
4368
10
    DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
4369
10
    for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4370
10
      assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4371
10
      const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4372
10
      Data.insert(
4373
10
          std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
4374
10
    }
4375
10
  }
4376
87
  if (!NeedToDelayGlobalization) {
4377
11
    emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
4378
11
    struct GlobalizationScope final : EHScopeStack::Cleanup {
4379
11
      GlobalizationScope() = default;
4380
4381
11
      void Emit(CodeGenFunction &CGF, Flags flags) override {
4382
11
        static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
4383
11
            .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
4384
11
      }
4385
11
    };
4386
11
    CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
4387
11
  }
4388
87
}
4389
4390
Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
4391
13.6k
                                                        const VarDecl *VD) {
4392
13.6k
  if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
4393
6
    const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4394
6
    auto AS = LangAS::Default;
4395
6
    switch (A->getAllocatorType()) {
4396
      // Use the default allocator here as by default local vars are
4397
      // threadlocal.
4398
2
    case OMPAllocateDeclAttr::OMPNullMemAlloc:
4399
2
    case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4400
2
    case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4401
2
    case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4402
2
    case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4403
      // Follow the user decision - use default allocation.
4404
2
      return Address::invalid();
4405
0
    case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4406
      // TODO: implement aupport for user-defined allocators.
4407
0
      return Address::invalid();
4408
0
    case OMPAllocateDeclAttr::OMPConstMemAlloc:
4409
0
      AS = LangAS::cuda_constant;
4410
0
      break;
4411
1
    case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4412
1
      AS = LangAS::cuda_shared;
4413
1
      break;
4414
3
    case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4415
3
    case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4416
3
      break;
4417
4
    }
4418
4
    llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4419
4
    auto *GV = new llvm::GlobalVariable(
4420
4
        CGM.getModule(), VarTy, /*isConstant=*/false,
4421
4
        llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
4422
4
        VD->getName(),
4423
4
        /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4424
4
        CGM.getContext().getTargetAddressSpace(AS));
4425
4
    CharUnits Align = CGM.getContext().getDeclAlign(VD);
4426
4
    GV->setAlignment(Align.getAsAlign());
4427
4
    return Address(
4428
4
        CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4429
4
            GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
4430
4
                    VD->getType().getAddressSpace()))),
4431
4
        Align);
4432
4
  }
4433
4434
13.6k
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
4435
5.24k
    return Address::invalid();
4436
4437
8.37k
  VD = VD->getCanonicalDecl();
4438
8.37k
  auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
4439
8.37k
  if (I == FunctionGlobalizedDecls.end())
4440
7.85k
    return Address::invalid();
4441
521
  auto VDI = I->getSecond().LocalVarData.find(VD);
4442
521
  if (VDI != I->getSecond().LocalVarData.end())
4443
72
    return VDI->second.PrivateAddr;
4444
449
  if (VD->hasAttrs()) {
4445
74
    for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
4446
74
         E(VD->attr_end());
4447
122
         IT != E; 
++IT48
) {
4448
74
      auto VDI = I->getSecond().LocalVarData.find(
4449
74
          cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
4450
74
              ->getCanonicalDecl());
4451
74
      if (VDI != I->getSecond().LocalVarData.end())
4452
26
        return VDI->second.PrivateAddr;
4453
74
    }
4454
74
  }
4455
4456
423
  return Address::invalid();
4457
449
}
4458
4459
3.02k
void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
4460
3.02k
  FunctionGlobalizedDecls.erase(CGF.CurFn);
4461
3.02k
  CGOpenMPRuntime::functionFinished(CGF);
4462
3.02k
}
4463
4464
void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
4465
    CodeGenFunction &CGF, const OMPLoopDirective &S,
4466
    OpenMPDistScheduleClauseKind &ScheduleKind,
4467
378
    llvm::Value *&Chunk) const {
4468
378
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
4469
378
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
4470
366
    ScheduleKind = OMPC_DIST_SCHEDULE_static;
4471
366
    Chunk = CGF.EmitScalarConversion(
4472
366
        RT.getGPUNumThreads(CGF),
4473
366
        CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4474
366
        S.getIterationVariable()->getType(), S.getBeginLoc());
4475
366
    return;
4476
366
  }
4477
12
  CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
4478
12
      CGF, S, ScheduleKind, Chunk);
4479
12
}
4480
4481
void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
4482
    CodeGenFunction &CGF, const OMPLoopDirective &S,
4483
    OpenMPScheduleClauseKind &ScheduleKind,
4484
200
    const Expr *&ChunkExpr) const {
4485
200
  ScheduleKind = OMPC_SCHEDULE_static;
4486
  // Chunk size is 1 in this case.
4487
200
  llvm::APInt ChunkSize(32, 1);
4488
200
  ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
4489
200
      CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4490
200
      SourceLocation());
4491
200
}
4492
4493
void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
4494
1.10k
    CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
4495
1.10k
  assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
4496
1.10k
         " Expected target-based directive.");
4497
1.10k
  const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
4498
1.29k
  for (const CapturedStmt::Capture &C : CS->captures()) {
4499
    // Capture variables captured by reference in lambdas for target-based
4500
    // directives.
4501
1.29k
    if (!C.capturesVariable())
4502
663
      continue;
4503
636
    const VarDecl *VD = C.getCapturedVar();
4504
636
    const auto *RD = VD->getType()
4505
636
                         .getCanonicalType()
4506
636
                         .getNonReferenceType()
4507
636
                         ->getAsCXXRecordDecl();
4508
636
    if (!RD || 
!RD->isLambda()30
)
4509
621
      continue;
4510
15
    Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4511
15
    LValue VDLVal;
4512
15
    if (VD->getType().getCanonicalType()->isReferenceType())
4513
15
      VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
4514
0
    else
4515
0
      VDLVal = CGF.MakeAddrLValue(
4516
0
          VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
4517
15
    llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4518
15
    FieldDecl *ThisCapture = nullptr;
4519
15
    RD->getCaptureFields(Captures, ThisCapture);
4520
15
    if (ThisCapture && 
CGF.CapturedStmtInfo->isCXXThisExprCaptured()9
) {
4521
6
      LValue ThisLVal =
4522
6
          CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
4523
6
      llvm::Value *CXXThis = CGF.LoadCXXThis();
4524
6
      CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
4525
6
    }
4526
39
    for (const LambdaCapture &LC : RD->captures()) {
4527
39
      if (LC.getCaptureKind() != LCK_ByRef)
4528
9
        continue;
4529
30
      const VarDecl *VD = LC.getCapturedVar();
4530
30
      if (!CS->capturesVariable(VD))
4531
0
        continue;
4532
30
      auto It = Captures.find(VD);
4533
30
      assert(It != Captures.end() && "Found lambda capture without field.");
4534
30
      LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
4535
30
      Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4536
30
      if (VD->getType().getCanonicalType()->isReferenceType())
4537
12
        VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
4538
12
                                               VD->getType().getCanonicalType())
4539
12
                     .getAddress(CGF);
4540
30
      CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
4541
30
    }
4542
15
  }
4543
1.10k
}
4544
4545
2
unsigned CGOpenMPRuntimeGPU::getDefaultFirstprivateAddressSpace() const {
4546
2
  return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
4547
2
}
4548
4549
bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
4550
98
                                                            LangAS &AS) {
4551
98
  if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
4552
85
    return false;
4553
13
  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4554
13
  switch(A->getAllocatorType()) {
4555
9
  case OMPAllocateDeclAttr::OMPNullMemAlloc:
4556
9
  case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4557
  // Not supported, fallback to the default mem space.
4558
9
  case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4559
9
  case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4560
9
  case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4561
9
  case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4562
9
  case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4563
9
    AS = LangAS::Default;
4564
9
    return true;
4565
2
  case OMPAllocateDeclAttr::OMPConstMemAlloc:
4566
2
    AS = LangAS::cuda_constant;
4567
2
    return true;
4568
2
  case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4569
2
    AS = LangAS::cuda_shared;
4570
2
    return true;
4571
0
  case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4572
0
    llvm_unreachable("Expected predefined allocator for the variables with the "
4573
0
                     "static storage.");
4574
0
  }
4575
0
  return false;
4576
0
}
4577
4578
// Get current CudaArch and ignore any unknown values
4579
17
static CudaArch getCudaArch(CodeGenModule &CGM) {
4580
17
  if (!CGM.getTarget().hasFeature("ptx"))
4581
0
    return CudaArch::UNKNOWN;
4582
33
  
for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap)17
{
4583
33
    if (Feature.getValue()) {
4584
33
      CudaArch Arch = StringToCudaArch(Feature.getKey());
4585
33
      if (Arch != CudaArch::UNKNOWN)
4586
17
        return Arch;
4587
33
    }
4588
33
  }
4589
0
  return CudaArch::UNKNOWN;
4590
17
}
4591
4592
/// Check to see if target architecture supports unified addressing which is
4593
/// a restriction for OpenMP requires clause "unified_shared_memory".
4594
void CGOpenMPRuntimeGPU::processRequiresDirective(
4595
17
    const OMPRequiresDecl *D) {
4596
17
  for (const OMPClause *Clause : D->clauselists()) {
4597
17
    if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
4598
17
      CudaArch Arch = getCudaArch(CGM);
4599
17
      switch (Arch) {
4600
12
      case CudaArch::SM_20:
4601
12
      case CudaArch::SM_21:
4602
12
      case CudaArch::SM_30:
4603
12
      case CudaArch::SM_32:
4604
12
      case CudaArch::SM_35:
4605
12
      case CudaArch::SM_37:
4606
12
      case CudaArch::SM_50:
4607
12
      case CudaArch::SM_52:
4608
12
      case CudaArch::SM_53:
4609
12
      case CudaArch::SM_60:
4610
12
      case CudaArch::SM_61:
4611
12
      case CudaArch::SM_62: {
4612
12
        SmallString<256> Buffer;
4613
12
        llvm::raw_svector_ostream Out(Buffer);
4614
12
        Out << "Target architecture " << CudaArchToString(Arch)
4615
12
            << " does not support unified addressing";
4616
12
        CGM.Error(Clause->getBeginLoc(), Out.str());
4617
12
        return;
4618
12
      }
4619
5
      case CudaArch::SM_70:
4620
5
      case CudaArch::SM_72:
4621
5
      case CudaArch::SM_75:
4622
5
      case CudaArch::SM_80:
4623
5
      case CudaArch::GFX600:
4624
5
      case CudaArch::GFX601:
4625
5
      case CudaArch::GFX602:
4626
5
      case CudaArch::GFX700:
4627
5
      case CudaArch::GFX701:
4628
5
      case CudaArch::GFX702:
4629
5
      case CudaArch::GFX703:
4630
5
      case CudaArch::GFX704:
4631
5
      case CudaArch::GFX705:
4632
5
      case CudaArch::GFX801:
4633
5
      case CudaArch::GFX802:
4634
5
      case CudaArch::GFX803:
4635
5
      case CudaArch::GFX805:
4636
5
      case CudaArch::GFX810:
4637
5
      case CudaArch::GFX900:
4638
5
      case CudaArch::GFX902:
4639
5
      case CudaArch::GFX904:
4640
5
      case CudaArch::GFX906:
4641
5
      case CudaArch::GFX908:
4642
5
      case CudaArch::GFX909:
4643
5
      case CudaArch::GFX1010:
4644
5
      case CudaArch::GFX1011:
4645
5
      case CudaArch::GFX1012:
4646
5
      case CudaArch::GFX1030:
4647
5
      case CudaArch::GFX1031:
4648
5
      case CudaArch::GFX1032:
4649
5
      case CudaArch::UNUSED:
4650
5
      case CudaArch::UNKNOWN:
4651
5
        break;
4652
0
      case CudaArch::LAST:
4653
0
        llvm_unreachable("Unexpected Cuda arch.");
4654
17
      }
4655
17
    }
4656
17
  }
4657
5
  CGOpenMPRuntime::processRequiresDirective(D);
4658
5
}
4659
4660
/// Get number of SMs and number of blocks per SM.
4661
0
static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
4662
0
  std::pair<unsigned, unsigned> Data;
4663
0
  if (CGM.getLangOpts().OpenMPCUDANumSMs)
4664
0
    Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
4665
0
  if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
4666
0
    Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
4667
0
  if (Data.first && Data.second)
4668
0
    return Data;
4669
0
  switch (getCudaArch(CGM)) {
4670
0
  case CudaArch::SM_20:
4671
0
  case CudaArch::SM_21:
4672
0
  case CudaArch::SM_30:
4673
0
  case CudaArch::SM_32:
4674
0
  case CudaArch::SM_35:
4675
0
  case CudaArch::SM_37:
4676
0
  case CudaArch::SM_50:
4677
0
  case CudaArch::SM_52:
4678
0
  case CudaArch::SM_53:
4679
0
    return {16, 16};
4680
0
  case CudaArch::SM_60:
4681
0
  case CudaArch::SM_61:
4682
0
  case CudaArch::SM_62:
4683
0
    return {56, 32};
4684
0
  case CudaArch::SM_70:
4685
0
  case CudaArch::SM_72:
4686
0
  case CudaArch::SM_75:
4687
0
  case CudaArch::SM_80:
4688
0
    return {84, 32};
4689
0
  case CudaArch::GFX600:
4690
0
  case CudaArch::GFX601:
4691
0
  case CudaArch::GFX602:
4692
0
  case CudaArch::GFX700:
4693
0
  case CudaArch::GFX701:
4694
0
  case CudaArch::GFX702:
4695
0
  case CudaArch::GFX703:
4696
0
  case CudaArch::GFX704:
4697
0
  case CudaArch::GFX705:
4698
0
  case CudaArch::GFX801:
4699
0
  case CudaArch::GFX802:
4700
0
  case CudaArch::GFX803:
4701
0
  case CudaArch::GFX805:
4702
0
  case CudaArch::GFX810:
4703
0
  case CudaArch::GFX900:
4704
0
  case CudaArch::GFX902:
4705
0
  case CudaArch::GFX904:
4706
0
  case CudaArch::GFX906:
4707
0
  case CudaArch::GFX908:
4708
0
  case CudaArch::GFX909:
4709
0
  case CudaArch::GFX1010:
4710
0
  case CudaArch::GFX1011:
4711
0
  case CudaArch::GFX1012:
4712
0
  case CudaArch::GFX1030:
4713
0
  case CudaArch::GFX1031:
4714
0
  case CudaArch::GFX1032:
4715
0
  case CudaArch::UNUSED:
4716
0
  case CudaArch::UNKNOWN:
4717
0
    break;
4718
0
  case CudaArch::LAST:
4719
0
    llvm_unreachable("Unexpected Cuda arch.");
4720
0
  }
4721
0
  llvm_unreachable("Unexpected NVPTX target without ptx feature.");
4722
0
}
4723
4724
213
void CGOpenMPRuntimeGPU::clear() {
4725
213
  if (!GlobalizedRecords.empty() &&
4726
190
      !CGM.getLangOpts().OpenMPCUDATargetParallel) {
4727
155
    ASTContext &C = CGM.getContext();
4728
155
    llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
4729
155
    llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
4730
155
    RecordDecl *StaticRD = C.buildImplicitRecord(
4731
155
        "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
4732
155
    StaticRD->startDefinition();
4733
155
    RecordDecl *SharedStaticRD = C.buildImplicitRecord(
4734
155
        "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
4735
155
    SharedStaticRD->startDefinition();
4736
756
    for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
4737
756
      if (Records.Records.empty())
4738
681
        continue;
4739
75
      unsigned Size = 0;
4740
75
      unsigned RecAlignment = 0;
4741
79
      for (const RecordDecl *RD : Records.Records) {
4742
79
        QualType RDTy = C.getRecordType(RD);
4743
79
        unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
4744
79
        RecAlignment = std::max(RecAlignment, Alignment);
4745
79
        unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
4746
79
        Size =
4747
79
            llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
4748
79
      }
4749
75
      Size = llvm::alignTo(Size, RecAlignment);
4750
75
      llvm::APInt ArySize(/*numBits=*/64, Size);
4751
75
      QualType SubTy = C.getConstantArrayType(
4752
75
          C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4753
75
      const bool UseSharedMemory = Size <= SharedMemorySize;
4754
75
      auto *Field =
4755
75
          FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : 
StaticRD0
,
4756
75
                            SourceLocation(), SourceLocation(), nullptr, SubTy,
4757
75
                            C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
4758
75
                            /*BW=*/nullptr, /*Mutable=*/false,
4759
75
                            /*InitStyle=*/ICIS_NoInit);
4760
75
      Field->setAccess(AS_public);
4761
75
      if (UseSharedMemory) {
4762
75
        SharedStaticRD->addDecl(Field);
4763
75
        SharedRecs.push_back(&Records);
4764
0
      } else {
4765
0
        StaticRD->addDecl(Field);
4766
0
        GlobalRecs.push_back(&Records);
4767
0
      }
4768
75
      Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
4769
75
      Records.UseSharedMemory->setInitializer(
4770
75
          llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 
00
));
4771
75
    }
4772
    // Allocate SharedMemorySize buffer for the shared memory.
4773
    // FIXME: nvlink does not handle weak linkage correctly (object with the
4774
    // different size are reported as erroneous).
4775
    // Restore this code as sson as nvlink is fixed.
4776
155
    if (!SharedStaticRD->field_empty()) {
4777
60
      llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
4778
60
      QualType SubTy = C.getConstantArrayType(
4779
60
          C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4780
60
      auto *Field = FieldDecl::Create(
4781
60
          C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
4782
60
          C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
4783
60
          /*BW=*/nullptr, /*Mutable=*/false,
4784
60
          /*InitStyle=*/ICIS_NoInit);
4785
60
      Field->setAccess(AS_public);
4786
60
      SharedStaticRD->addDecl(Field);
4787
60
    }
4788
155
    SharedStaticRD->completeDefinition();
4789
155
    if (!SharedStaticRD->field_empty()) {
4790
60
      QualType StaticTy = C.getRecordType(SharedStaticRD);
4791
60
      llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
4792
60
      auto *GV = new llvm::GlobalVariable(
4793
60
          CGM.getModule(), LLVMStaticTy,
4794
60
          /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
4795
60
          llvm::Constant::getNullValue(LLVMStaticTy),
4796
60
          "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
4797
60
          llvm::GlobalValue::NotThreadLocal,
4798
60
          C.getTargetAddressSpace(LangAS::cuda_shared));
4799
60
      auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
4800
60
          GV, CGM.VoidPtrTy);
4801
75
      for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
4802
75
        Rec->Buffer->replaceAllUsesWith(Replacement);
4803
75
        Rec->Buffer->eraseFromParent();
4804
75
      }
4805
60
    }
4806
155
    StaticRD->completeDefinition();
4807
155
    if (!StaticRD->field_empty()) {
4808
0
      QualType StaticTy = C.getRecordType(StaticRD);
4809
0
      std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
4810
0
      llvm::APInt Size1(32, SMsBlockPerSM.second);
4811
0
      QualType Arr1Ty =
4812
0
          C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
4813
0
                                 /*IndexTypeQuals=*/0);
4814
0
      llvm::APInt Size2(32, SMsBlockPerSM.first);
4815
0
      QualType Arr2Ty =
4816
0
          C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
4817
0
                                 /*IndexTypeQuals=*/0);
4818
0
      llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
4819
      // FIXME: nvlink does not handle weak linkage correctly (object with the
4820
      // different size are reported as erroneous).
4821
      // Restore CommonLinkage as soon as nvlink is fixed.
4822
0
      auto *GV = new llvm::GlobalVariable(
4823
0
          CGM.getModule(), LLVMArr2Ty,
4824
0
          /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
4825
0
          llvm::Constant::getNullValue(LLVMArr2Ty),
4826
0
          "_openmp_static_glob_rd_$_");
4827
0
      auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
4828
0
          GV, CGM.VoidPtrTy);
4829
0
      for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
4830
0
        Rec->Buffer->replaceAllUsesWith(Replacement);
4831
0
        Rec->Buffer->eraseFromParent();
4832
0
      }
4833
0
    }
4834
155
  }
4835
213
  if (!TeamsReductions.empty()) {
4836
6
    ASTContext &C = CGM.getContext();
4837
6
    RecordDecl *StaticRD = C.buildImplicitRecord(
4838
6
        "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
4839
6
    StaticRD->startDefinition();
4840
18
    for (const RecordDecl *TeamReductionRec : TeamsReductions) {
4841
18
      QualType RecTy = C.getRecordType(TeamReductionRec);
4842
18
      auto *Field = FieldDecl::Create(
4843
18
          C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
4844
18
          C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
4845
18
          /*BW=*/nullptr, /*Mutable=*/false,
4846
18
          /*InitStyle=*/ICIS_NoInit);
4847
18
      Field->setAccess(AS_public);
4848
18
      StaticRD->addDecl(Field);
4849
18
    }
4850
6
    StaticRD->completeDefinition();
4851
6
    QualType StaticTy = C.getRecordType(StaticRD);
4852
6
    llvm::Type *LLVMReductionsBufferTy =
4853
6
        CGM.getTypes().ConvertTypeForMem(StaticTy);
4854
    // FIXME: nvlink does not handle weak linkage correctly (object with the
4855
    // different size are reported as erroneous).
4856
    // Restore CommonLinkage as soon as nvlink is fixed.
4857
6
    auto *GV = new llvm::GlobalVariable(
4858
6
        CGM.getModule(), LLVMReductionsBufferTy,
4859
6
        /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
4860
6
        llvm::Constant::getNullValue(LLVMReductionsBufferTy),
4861
6
        "_openmp_teams_reductions_buffer_$_");
4862
6
    KernelTeamsReductionPtr->setInitializer(
4863
6
        llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
4864
6
                                                             CGM.VoidPtrTy));
4865
6
  }
4866
213
  CGOpenMPRuntime::clear();
4867
213
}