Coverage Report

Created: 2022-05-14 11:35

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
Line
Count
Source (jump to first uncovered line)
1
//===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This provides a generalized class for OpenMP runtime code generation
10
// specialized by GPU targets NVPTX and AMDGCN.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "CGOpenMPRuntimeGPU.h"
15
#include "CodeGenFunction.h"
16
#include "clang/AST/Attr.h"
17
#include "clang/AST/DeclOpenMP.h"
18
#include "clang/AST/StmtOpenMP.h"
19
#include "clang/AST/StmtVisitor.h"
20
#include "clang/Basic/Cuda.h"
21
#include "llvm/ADT/SmallPtrSet.h"
22
#include "llvm/Frontend/OpenMP/OMPGridValues.h"
23
#include "llvm/Support/MathExtras.h"
24
25
using namespace clang;
26
using namespace CodeGen;
27
using namespace llvm::omp;
28
29
namespace {
30
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
31
class NVPTXActionTy final : public PrePostActionTy {
32
  llvm::FunctionCallee EnterCallee = nullptr;
33
  ArrayRef<llvm::Value *> EnterArgs;
34
  llvm::FunctionCallee ExitCallee = nullptr;
35
  ArrayRef<llvm::Value *> ExitArgs;
36
  bool Conditional = false;
37
  llvm::BasicBlock *ContBlock = nullptr;
38
39
public:
40
  NVPTXActionTy(llvm::FunctionCallee EnterCallee,
41
                ArrayRef<llvm::Value *> EnterArgs,
42
                llvm::FunctionCallee ExitCallee,
43
                ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
44
      : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
45
28
        ExitArgs(ExitArgs), Conditional(Conditional) {}
46
0
  void Enter(CodeGenFunction &CGF) override {
47
0
    llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
48
0
    if (Conditional) {
49
0
      llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
50
0
      auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
51
0
      ContBlock = CGF.createBasicBlock("omp_if.end");
52
      // Generate the branch (If-stmt)
53
0
      CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
54
0
      CGF.EmitBlock(ThenBlock);
55
0
    }
56
0
  }
57
0
  void Done(CodeGenFunction &CGF) {
58
0
    // Emit the rest of blocks/branches
59
0
    CGF.EmitBranch(ContBlock);
60
0
    CGF.EmitBlock(ContBlock, true);
61
0
  }
62
28
  void Exit(CodeGenFunction &CGF) override {
63
28
    CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
64
28
  }
65
};
66
67
/// A class to track the execution mode when codegening directives within
68
/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
69
/// to the target region and used by containing directives such as 'parallel'
70
/// to emit optimized code.
71
class ExecutionRuntimeModesRAII {
72
private:
73
  CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
74
      CGOpenMPRuntimeGPU::EM_Unknown;
75
  CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
76
  bool SavedRuntimeMode = false;
77
  bool *RuntimeMode = nullptr;
78
79
public:
80
  /// Constructor for Non-SPMD mode.
81
  ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
82
158
      : ExecMode(ExecMode) {
83
158
    SavedExecMode = ExecMode;
84
158
    ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
85
158
  }
86
  /// Constructor for SPMD mode.
87
  ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
88
                            bool &RuntimeMode, bool FullRuntimeMode)
89
545
      : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
90
545
    SavedExecMode = ExecMode;
91
545
    SavedRuntimeMode = RuntimeMode;
92
545
    ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
93
545
    RuntimeMode = FullRuntimeMode;
94
545
  }
95
703
  ~ExecutionRuntimeModesRAII() {
96
703
    ExecMode = SavedExecMode;
97
703
    if (RuntimeMode)
98
545
      *RuntimeMode = SavedRuntimeMode;
99
703
  }
100
};
101
102
/// GPU Configuration:  This information can be derived from cuda registers,
103
/// however, providing compile time constants helps generate more efficient
104
/// code.  For all practical purposes this is fine because the configuration
105
/// is the same for all known NVPTX architectures.
106
enum MachineConfiguration : unsigned {
107
  /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
108
  /// specific Grid Values like GV_Warp_Size, GV_Slot_Size
109
110
  /// Global memory alignment for performance.
111
  GlobalMemoryAlignment = 128,
112
113
  /// Maximal size of the shared memory buffer.
114
  SharedMemorySize = 128,
115
};
116
117
31
static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
118
31
  RefExpr = RefExpr->IgnoreParens();
119
31
  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
120
0
    const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
121
0
    while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
122
0
      Base = TempASE->getBase()->IgnoreParenImpCasts();
123
0
    RefExpr = Base;
124
31
  } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
125
0
    const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
126
0
    while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
127
0
      Base = TempOASE->getBase()->IgnoreParenImpCasts();
128
0
    while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
129
0
      Base = TempASE->getBase()->IgnoreParenImpCasts();
130
0
    RefExpr = Base;
131
0
  }
132
31
  RefExpr = RefExpr->IgnoreParenImpCasts();
133
31
  if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
134
31
    return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
135
0
  const auto *ME = cast<MemberExpr>(RefExpr);
136
0
  return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
137
31
}
138
139
140
static RecordDecl *buildRecordForGlobalizedVars(
141
    ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
142
    ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
143
    llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
144
760
        &MappedDeclsFields, int BufSize) {
145
760
  using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
146
760
  if (EscapedDecls.empty() && 
EscapedDeclsForTeams.empty()736
)
147
680
    return nullptr;
148
80
  SmallVector<VarsDataTy, 4> GlobalizedVars;
149
80
  for (const ValueDecl *D : EscapedDecls)
150
30
    GlobalizedVars.emplace_back(
151
30
        CharUnits::fromQuantity(std::max(
152
30
            C.getDeclAlign(D).getQuantity(),
153
30
            static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
154
30
        D);
155
80
  for (const ValueDecl *D : EscapedDeclsForTeams)
156
78
    GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
157
80
  llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
158
31
    return L.first > R.first;
159
31
  });
160
161
  // Build struct _globalized_locals_ty {
162
  //         /*  globalized vars  */[WarSize] align (max(decl_align,
163
  //         GlobalMemoryAlignment))
164
  //         /*  globalized vars  */ for EscapedDeclsForTeams
165
  //       };
166
80
  RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
167
80
  GlobalizedRD->startDefinition();
168
80
  llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
169
80
      EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
170
108
  for (const auto &Pair : GlobalizedVars) {
171
108
    const ValueDecl *VD = Pair.second;
172
108
    QualType Type = VD->getType();
173
108
    if (Type->isLValueReferenceType())
174
3
      Type = C.getPointerType(Type.getNonReferenceType());
175
105
    else
176
105
      Type = Type.getNonReferenceType();
177
108
    SourceLocation Loc = VD->getLocation();
178
108
    FieldDecl *Field;
179
108
    if (SingleEscaped.count(VD)) {
180
78
      Field = FieldDecl::Create(
181
78
          C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
182
78
          C.getTrivialTypeSourceInfo(Type, SourceLocation()),
183
78
          /*BW=*/nullptr, /*Mutable=*/false,
184
78
          /*InitStyle=*/ICIS_NoInit);
185
78
      Field->setAccess(AS_public);
186
78
      if (VD->hasAttrs()) {
187
9
        for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
188
9
             E(VD->getAttrs().end());
189
9
             I != E; 
++I0
)
190
0
          Field->addAttr(*I);
191
9
      }
192
78
    } else {
193
30
      llvm::APInt ArraySize(32, BufSize);
194
30
      Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
195
30
                                    0);
196
30
      Field = FieldDecl::Create(
197
30
          C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
198
30
          C.getTrivialTypeSourceInfo(Type, SourceLocation()),
199
30
          /*BW=*/nullptr, /*Mutable=*/false,
200
30
          /*InitStyle=*/ICIS_NoInit);
201
30
      Field->setAccess(AS_public);
202
30
      llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
203
30
                                     static_cast<CharUnits::QuantityType>(
204
30
                                         GlobalMemoryAlignment)));
205
30
      Field->addAttr(AlignedAttr::CreateImplicit(
206
30
          C, /*IsAlignmentExpr=*/true,
207
30
          IntegerLiteral::Create(C, Align,
208
30
                                 C.getIntTypeForBitwidth(32, /*Signed=*/0),
209
30
                                 SourceLocation()),
210
30
          {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
211
30
    }
212
108
    GlobalizedRD->addDecl(Field);
213
108
    MappedDeclsFields.try_emplace(VD, Field);
214
108
  }
215
80
  GlobalizedRD->completeDefinition();
216
80
  return GlobalizedRD;
217
760
}
218
219
/// Get the list of variables that can escape their declaration context.
220
class CheckVarsEscapingDeclContext final
221
    : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
222
  CodeGenFunction &CGF;
223
  llvm::SetVector<const ValueDecl *> EscapedDecls;
224
  llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
225
  llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
226
  RecordDecl *GlobalizedRD = nullptr;
227
  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
228
  bool AllEscaped = false;
229
  bool IsForCombinedParallelRegion = false;
230
231
282
  void markAsEscaped(const ValueDecl *VD) {
232
    // Do not globalize declare target variables.
233
282
    if (!isa<VarDecl>(VD) ||
234
282
        
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)264
)
235
18
      return;
236
264
    VD = cast<ValueDecl>(VD->getCanonicalDecl());
237
    // Use user-specified allocation.
238
264
    if (VD->hasAttrs() && 
VD->hasAttr<OMPAllocateDeclAttr>()0
)
239
0
      return;
240
    // Variables captured by value must be globalized.
241
264
    if (auto *CSI = CGF.CapturedStmtInfo) {
242
154
      if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
243
        // Check if need to capture the variable that was already captured by
244
        // value in the outer region.
245
121
        if (!IsForCombinedParallelRegion) {
246
121
          if (!FD->hasAttrs())
247
1
            return;
248
120
          const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
249
120
          if (!Attr)
250
0
            return;
251
120
          if (((Attr->getCaptureKind() != OMPC_map) &&
252
120
               
!isOpenMPPrivate(Attr->getCaptureKind())41
) ||
253
120
              ((Attr->getCaptureKind() == OMPC_map) &&
254
120
               
!FD->getType()->isAnyPointerType()79
))
255
79
            return;
256
120
        }
257
41
        if (!FD->getType()->isReferenceType()) {
258
17
          assert(!VD->getType()->isVariablyModifiedType() &&
259
17
                 "Parameter captured by value with variably modified type");
260
0
          EscapedParameters.insert(VD);
261
24
        } else if (!IsForCombinedParallelRegion) {
262
24
          return;
263
24
        }
264
41
      }
265
154
    }
266
160
    if ((!CGF.CapturedStmtInfo ||
267
160
         
(50
IsForCombinedParallelRegion50
&&
CGF.CapturedStmtInfo0
)) &&
268
160
        
VD->getType()->isReferenceType()110
)
269
      // Do not globalize variables with reference type.
270
84
      return;
271
76
    if (VD->getType()->isVariablyModifiedType())
272
0
      EscapedVariableLengthDecls.insert(VD);
273
76
    else
274
76
      EscapedDecls.insert(VD);
275
76
  }
276
277
215
  void VisitValueDecl(const ValueDecl *VD) {
278
215
    if (VD->getType()->isLValueReferenceType())
279
3
      markAsEscaped(VD);
280
215
    if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
281
215
      if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
282
196
        const bool SavedAllEscaped = AllEscaped;
283
196
        AllEscaped = VD->getType()->isLValueReferenceType();
284
196
        Visit(VarD->getInit());
285
196
        AllEscaped = SavedAllEscaped;
286
196
      }
287
215
    }
288
215
  }
289
  void VisitOpenMPCapturedStmt(const CapturedStmt *S,
290
                               ArrayRef<OMPClause *> Clauses,
291
65
                               bool IsCombinedParallelRegion) {
292
65
    if (!S)
293
0
      return;
294
65
    for (const CapturedStmt::Capture &C : S->captures()) {
295
57
      if (C.capturesVariable() && !C.capturesVariableByCopy()) {
296
57
        const ValueDecl *VD = C.getCapturedVar();
297
57
        bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
298
57
        if (IsCombinedParallelRegion) {
299
          // Check if the variable is privatized in the combined construct and
300
          // those private copies must be shared in the inner parallel
301
          // directive.
302
0
          IsForCombinedParallelRegion = false;
303
0
          for (const OMPClause *C : Clauses) {
304
0
            if (!isOpenMPPrivate(C->getClauseKind()) ||
305
0
                C->getClauseKind() == OMPC_reduction ||
306
0
                C->getClauseKind() == OMPC_linear ||
307
0
                C->getClauseKind() == OMPC_private)
308
0
              continue;
309
0
            ArrayRef<const Expr *> Vars;
310
0
            if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
311
0
              Vars = PC->getVarRefs();
312
0
            else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
313
0
              Vars = PC->getVarRefs();
314
0
            else
315
0
              llvm_unreachable("Unexpected clause.");
316
0
            for (const auto *E : Vars) {
317
0
              const Decl *D =
318
0
                  cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
319
0
              if (D == VD->getCanonicalDecl()) {
320
0
                IsForCombinedParallelRegion = true;
321
0
                break;
322
0
              }
323
0
            }
324
0
            if (IsForCombinedParallelRegion)
325
0
              break;
326
0
          }
327
0
        }
328
57
        markAsEscaped(VD);
329
57
        if (isa<OMPCapturedExprDecl>(VD))
330
0
          VisitValueDecl(VD);
331
57
        IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
332
57
      }
333
57
    }
334
65
  }
335
336
729
  void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
337
729
    assert(!GlobalizedRD &&
338
729
           "Record for globalized variables is built already.");
339
0
    ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
340
729
    unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
341
729
    if (IsInTTDRegion)
342
217
      EscapedDeclsForTeams = EscapedDecls.getArrayRef();
343
512
    else
344
512
      EscapedDeclsForParallel = EscapedDecls.getArrayRef();
345
729
    GlobalizedRD = ::buildRecordForGlobalizedVars(
346
729
        CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
347
729
        MappedDeclsFields, WarpSize);
348
729
  }
349
350
public:
351
  CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
352
                               ArrayRef<const ValueDecl *> TeamsReductions)
353
742
      : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
354
742
  }
355
742
  virtual ~CheckVarsEscapingDeclContext() = default;
356
220
  void VisitDeclStmt(const DeclStmt *S) {
357
220
    if (!S)
358
0
      return;
359
220
    for (const Decl *D : S->decls())
360
220
      if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
361
215
        VisitValueDecl(VD);
362
220
  }
363
70
  void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
364
70
    if (!D)
365
0
      return;
366
70
    if (!D->hasAssociatedStmt())
367
2
      return;
368
68
    if (const auto *S =
369
68
            dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
370
      // Do not analyze directives that do not actually require capturing,
371
      // like `omp for` or `omp simd` directives.
372
66
      llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
373
66
      getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
374
66
      if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
375
1
        VisitStmt(S->getCapturedStmt());
376
1
        return;
377
1
      }
378
65
      VisitOpenMPCapturedStmt(
379
65
          S, D->clauses(),
380
65
          CaptureRegions.back() == OMPD_parallel &&
381
65
              
isOpenMPDistributeDirective(D->getDirectiveKind())51
);
382
65
    }
383
68
  }
384
26
  void VisitCapturedStmt(const CapturedStmt *S) {
385
26
    if (!S)
386
0
      return;
387
26
    for (const CapturedStmt::Capture &C : S->captures()) {
388
7
      if (C.capturesVariable() && 
!C.capturesVariableByCopy()1
) {
389
1
        const ValueDecl *VD = C.getCapturedVar();
390
1
        markAsEscaped(VD);
391
1
        if (isa<OMPCapturedExprDecl>(VD))
392
0
          VisitValueDecl(VD);
393
1
      }
394
7
    }
395
26
  }
396
1
  void VisitLambdaExpr(const LambdaExpr *E) {
397
1
    if (!E)
398
0
      return;
399
1
    for (const LambdaCapture &C : E->captures()) {
400
0
      if (C.capturesVariable()) {
401
0
        if (C.getCaptureKind() == LCK_ByRef) {
402
0
          const ValueDecl *VD = C.getCapturedVar();
403
0
          markAsEscaped(VD);
404
0
          if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
405
0
            VisitValueDecl(VD);
406
0
        }
407
0
      }
408
0
    }
409
1
  }
410
0
  void VisitBlockExpr(const BlockExpr *E) {
411
0
    if (!E)
412
0
      return;
413
0
    for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
414
0
      if (C.isByRef()) {
415
0
        const VarDecl *VD = C.getVariable();
416
0
        markAsEscaped(VD);
417
0
        if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
418
0
          VisitValueDecl(VD);
419
0
      }
420
0
    }
421
0
  }
422
1.85k
  void VisitCallExpr(const CallExpr *E) {
423
1.85k
    if (!E)
424
0
      return;
425
1.85k
    for (const Expr *Arg : E->arguments()) {
426
1.54k
      if (!Arg)
427
0
        continue;
428
1.54k
      if (Arg->isLValue()) {
429
238
        const bool SavedAllEscaped = AllEscaped;
430
238
        AllEscaped = true;
431
238
        Visit(Arg);
432
238
        AllEscaped = SavedAllEscaped;
433
1.30k
      } else {
434
1.30k
        Visit(Arg);
435
1.30k
      }
436
1.54k
    }
437
1.85k
    Visit(E->getCallee());
438
1.85k
  }
439
3.79k
  void VisitDeclRefExpr(const DeclRefExpr *E) {
440
3.79k
    if (!E)
441
0
      return;
442
3.79k
    const ValueDecl *VD = E->getDecl();
443
3.79k
    if (AllEscaped)
444
221
      markAsEscaped(VD);
445
3.79k
    if (isa<OMPCapturedExprDecl>(VD))
446
0
      VisitValueDecl(VD);
447
3.79k
    else if (const auto *VarD = dyn_cast<VarDecl>(VD))
448
2.39k
      if (VarD->isInitCapture())
449
0
        VisitValueDecl(VD);
450
3.79k
  }
451
238
  void VisitUnaryOperator(const UnaryOperator *E) {
452
238
    if (!E)
453
0
      return;
454
238
    if (E->getOpcode() == UO_AddrOf) {
455
1
      const bool SavedAllEscaped = AllEscaped;
456
1
      AllEscaped = true;
457
1
      Visit(E->getSubExpr());
458
1
      AllEscaped = SavedAllEscaped;
459
237
    } else {
460
237
      Visit(E->getSubExpr());
461
237
    }
462
238
  }
463
3.48k
  void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
464
3.48k
    if (!E)
465
0
      return;
466
3.48k
    if (E->getCastKind() == CK_ArrayToPointerDecay) {
467
88
      const bool SavedAllEscaped = AllEscaped;
468
88
      AllEscaped = true;
469
88
      Visit(E->getSubExpr());
470
88
      AllEscaped = SavedAllEscaped;
471
3.39k
    } else {
472
3.39k
      Visit(E->getSubExpr());
473
3.39k
    }
474
3.48k
  }
475
4.27k
  void VisitExpr(const Expr *E) {
476
4.27k
    if (!E)
477
0
      return;
478
4.27k
    bool SavedAllEscaped = AllEscaped;
479
4.27k
    if (!E->isLValue())
480
3.38k
      AllEscaped = false;
481
4.27k
    for (const Stmt *Child : E->children())
482
5.07k
      if (Child)
483
5.07k
        Visit(Child);
484
4.27k
    AllEscaped = SavedAllEscaped;
485
4.27k
  }
486
1.44k
  void VisitStmt(const Stmt *S) {
487
1.44k
    if (!S)
488
0
      return;
489
1.44k
    for (const Stmt *Child : S->children())
490
2.29k
      if (Child)
491
2.27k
        Visit(Child);
492
1.44k
  }
493
494
  /// Returns the record that handles all the escaped local variables and used
495
  /// instead of their original storage.
496
729
  const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
497
729
    if (!GlobalizedRD)
498
729
      buildRecordForGlobalizedVars(IsInTTDRegion);
499
729
    return GlobalizedRD;
500
729
  }
501
502
  /// Returns the field in the globalized record for the escaped variable.
503
0
  const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
504
0
    assert(GlobalizedRD &&
505
0
           "Record for globalized variables must be generated already.");
506
0
    auto I = MappedDeclsFields.find(VD);
507
0
    if (I == MappedDeclsFields.end())
508
0
      return nullptr;
509
0
    return I->getSecond();
510
0
  }
511
512
  /// Returns the list of the escaped local variables/parameters.
513
62
  ArrayRef<const ValueDecl *> getEscapedDecls() const {
514
62
    return EscapedDecls.getArrayRef();
515
62
  }
516
517
  /// Checks if the escaped local variable is actually a parameter passed by
518
  /// value.
519
98
  const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
520
98
    return EscapedParameters;
521
98
  }
522
523
  /// Returns the list of the escaped variables with the variably modified
524
  /// types.
525
729
  ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
526
729
    return EscapedVariableLengthDecls.getArrayRef();
527
729
  }
528
};
529
} // anonymous namespace
530
531
/// Get the id of the warp in the block.
532
/// We assume that the warp size is 32, which is always the case
533
/// on the NVPTX device, to generate more efficient code.
534
28
static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
535
28
  CGBuilderTy &Bld = CGF.Builder;
536
28
  unsigned LaneIDBits =
537
28
      llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
538
28
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
539
28
  return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
540
28
}
541
542
/// Get the id of the current lane in the Warp.
543
/// We assume that the warp size is 32, which is always the case
544
/// on the NVPTX device, to generate more efficient code.
545
28
static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
546
28
  CGBuilderTy &Bld = CGF.Builder;
547
28
  unsigned LaneIDBits =
548
28
      llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
549
28
  unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
550
28
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
551
28
  return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
552
28
                       "nvptx_lane_id");
553
28
}
554
555
CGOpenMPRuntimeGPU::ExecutionMode
556
5.79k
CGOpenMPRuntimeGPU::getExecutionMode() const {
557
5.79k
  return CurrentExecutionMode;
558
5.79k
}
559
560
static CGOpenMPRuntimeGPU::DataSharingMode
561
12.4k
getDataSharingMode(CodeGenModule &CGM) {
562
12.4k
  return CGM.getLangOpts().OpenMPCUDAMode ? 
CGOpenMPRuntimeGPU::CUDA2.61k
563
12.4k
                                          : 
CGOpenMPRuntimeGPU::Generic9.87k
;
564
12.4k
}
565
566
/// Check for inner (nested) SPMD construct, if any
567
static bool hasNestedSPMDDirective(ASTContext &Ctx,
568
605
                                   const OMPExecutableDirective &D) {
569
605
  const auto *CS = D.getInnermostCapturedStmt();
570
605
  const auto *Body =
571
605
      CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
572
605
  const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
573
574
605
  if (const auto *NestedDir =
575
605
          dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
576
471
    OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
577
471
    switch (D.getDirectiveKind()) {
578
285
    case OMPD_target:
579
285
      if (isOpenMPParallelDirective(DKind))
580
177
        return true;
581
108
      if (DKind == OMPD_teams) {
582
107
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
583
107
            /*IgnoreCaptured=*/true);
584
107
        if (!Body)
585
0
          return false;
586
107
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
587
107
        if (const auto *NND =
588
107
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
589
93
          DKind = NND->getDirectiveKind();
590
93
          if (isOpenMPParallelDirective(DKind))
591
93
            return true;
592
93
        }
593
107
      }
594
15
      return false;
595
186
    case OMPD_target_teams:
596
186
      return isOpenMPParallelDirective(DKind);
597
0
    case OMPD_target_simd:
598
0
    case OMPD_target_parallel:
599
0
    case OMPD_target_parallel_for:
600
0
    case OMPD_target_parallel_for_simd:
601
0
    case OMPD_target_teams_distribute:
602
0
    case OMPD_target_teams_distribute_simd:
603
0
    case OMPD_target_teams_distribute_parallel_for:
604
0
    case OMPD_target_teams_distribute_parallel_for_simd:
605
0
    case OMPD_parallel:
606
0
    case OMPD_for:
607
0
    case OMPD_parallel_for:
608
0
    case OMPD_parallel_master:
609
0
    case OMPD_parallel_sections:
610
0
    case OMPD_for_simd:
611
0
    case OMPD_parallel_for_simd:
612
0
    case OMPD_cancel:
613
0
    case OMPD_cancellation_point:
614
0
    case OMPD_ordered:
615
0
    case OMPD_threadprivate:
616
0
    case OMPD_allocate:
617
0
    case OMPD_task:
618
0
    case OMPD_simd:
619
0
    case OMPD_sections:
620
0
    case OMPD_section:
621
0
    case OMPD_single:
622
0
    case OMPD_master:
623
0
    case OMPD_critical:
624
0
    case OMPD_taskyield:
625
0
    case OMPD_barrier:
626
0
    case OMPD_taskwait:
627
0
    case OMPD_taskgroup:
628
0
    case OMPD_atomic:
629
0
    case OMPD_flush:
630
0
    case OMPD_depobj:
631
0
    case OMPD_scan:
632
0
    case OMPD_teams:
633
0
    case OMPD_target_data:
634
0
    case OMPD_target_exit_data:
635
0
    case OMPD_target_enter_data:
636
0
    case OMPD_distribute:
637
0
    case OMPD_distribute_simd:
638
0
    case OMPD_distribute_parallel_for:
639
0
    case OMPD_distribute_parallel_for_simd:
640
0
    case OMPD_teams_distribute:
641
0
    case OMPD_teams_distribute_simd:
642
0
    case OMPD_teams_distribute_parallel_for:
643
0
    case OMPD_teams_distribute_parallel_for_simd:
644
0
    case OMPD_target_update:
645
0
    case OMPD_declare_simd:
646
0
    case OMPD_declare_variant:
647
0
    case OMPD_begin_declare_variant:
648
0
    case OMPD_end_declare_variant:
649
0
    case OMPD_declare_target:
650
0
    case OMPD_end_declare_target:
651
0
    case OMPD_declare_reduction:
652
0
    case OMPD_declare_mapper:
653
0
    case OMPD_taskloop:
654
0
    case OMPD_taskloop_simd:
655
0
    case OMPD_master_taskloop:
656
0
    case OMPD_master_taskloop_simd:
657
0
    case OMPD_parallel_master_taskloop:
658
0
    case OMPD_parallel_master_taskloop_simd:
659
0
    case OMPD_requires:
660
0
    case OMPD_unknown:
661
0
    default:
662
0
      llvm_unreachable("Unexpected directive.");
663
471
    }
664
471
  }
665
666
134
  return false;
667
605
}
668
669
static bool supportsSPMDExecutionMode(ASTContext &Ctx,
670
1.25k
                                      const OMPExecutableDirective &D) {
671
1.25k
  OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
672
1.25k
  switch (DirectiveKind) {
673
403
  case OMPD_target:
674
605
  case OMPD_target_teams:
675
605
    return hasNestedSPMDDirective(Ctx, D);
676
261
  case OMPD_target_parallel:
677
330
  case OMPD_target_parallel_for:
678
330
  case OMPD_target_parallel_for_simd:
679
459
  case OMPD_target_teams_distribute_parallel_for:
680
546
  case OMPD_target_teams_distribute_parallel_for_simd:
681
594
  case OMPD_target_simd:
682
642
  case OMPD_target_teams_distribute_simd:
683
642
    return true;
684
9
  case OMPD_target_teams_distribute:
685
9
    return false;
686
0
  case OMPD_parallel:
687
0
  case OMPD_for:
688
0
  case OMPD_parallel_for:
689
0
  case OMPD_parallel_master:
690
0
  case OMPD_parallel_sections:
691
0
  case OMPD_for_simd:
692
0
  case OMPD_parallel_for_simd:
693
0
  case OMPD_cancel:
694
0
  case OMPD_cancellation_point:
695
0
  case OMPD_ordered:
696
0
  case OMPD_threadprivate:
697
0
  case OMPD_allocate:
698
0
  case OMPD_task:
699
0
  case OMPD_simd:
700
0
  case OMPD_sections:
701
0
  case OMPD_section:
702
0
  case OMPD_single:
703
0
  case OMPD_master:
704
0
  case OMPD_critical:
705
0
  case OMPD_taskyield:
706
0
  case OMPD_barrier:
707
0
  case OMPD_taskwait:
708
0
  case OMPD_taskgroup:
709
0
  case OMPD_atomic:
710
0
  case OMPD_flush:
711
0
  case OMPD_depobj:
712
0
  case OMPD_scan:
713
0
  case OMPD_teams:
714
0
  case OMPD_target_data:
715
0
  case OMPD_target_exit_data:
716
0
  case OMPD_target_enter_data:
717
0
  case OMPD_distribute:
718
0
  case OMPD_distribute_simd:
719
0
  case OMPD_distribute_parallel_for:
720
0
  case OMPD_distribute_parallel_for_simd:
721
0
  case OMPD_teams_distribute:
722
0
  case OMPD_teams_distribute_simd:
723
0
  case OMPD_teams_distribute_parallel_for:
724
0
  case OMPD_teams_distribute_parallel_for_simd:
725
0
  case OMPD_target_update:
726
0
  case OMPD_declare_simd:
727
0
  case OMPD_declare_variant:
728
0
  case OMPD_begin_declare_variant:
729
0
  case OMPD_end_declare_variant:
730
0
  case OMPD_declare_target:
731
0
  case OMPD_end_declare_target:
732
0
  case OMPD_declare_reduction:
733
0
  case OMPD_declare_mapper:
734
0
  case OMPD_taskloop:
735
0
  case OMPD_taskloop_simd:
736
0
  case OMPD_master_taskloop:
737
0
  case OMPD_master_taskloop_simd:
738
0
  case OMPD_parallel_master_taskloop:
739
0
  case OMPD_parallel_master_taskloop_simd:
740
0
  case OMPD_requires:
741
0
  case OMPD_unknown:
742
0
  default:
743
0
    break;
744
1.25k
  }
745
0
  llvm_unreachable(
746
0
      "Unknown programming model for OpenMP directive on NVPTX target.");
747
0
}
748
749
/// Check if the directive is loops based and has schedule clause at all or has
750
/// static scheduling.
751
237
static bool hasStaticScheduling(const OMPExecutableDirective &D) {
752
237
  assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
753
237
         isOpenMPLoopDirective(D.getDirectiveKind()) &&
754
237
         "Expected loop-based directive.");
755
237
  return !D.hasClausesOfKind<OMPOrderedClause>() &&
756
237
         
(234
!D.hasClausesOfKind<OMPScheduleClause>()234
||
757
234
          llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
758
172
                       [](const OMPScheduleClause *C) {
759
172
                         return C->getScheduleKind() == OMPC_SCHEDULE_static;
760
172
                       }));
761
237
}
762
763
/// Check for inner (nested) lightweight runtime construct, if any
764
static bool hasNestedLightweightDirective(ASTContext &Ctx,
765
197
                                          const OMPExecutableDirective &D) {
766
197
  assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
767
0
  const auto *CS = D.getInnermostCapturedStmt();
768
197
  const auto *Body =
769
197
      CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
770
197
  const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
771
772
197
  if (const auto *NestedDir =
773
197
          dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
774
138
    OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
775
138
    switch (D.getDirectiveKind()) {
776
69
    case OMPD_target:
777
69
      if (isOpenMPParallelDirective(DKind) &&
778
69
          
isOpenMPWorksharingDirective(DKind)45
&&
isOpenMPLoopDirective(DKind)21
&&
779
69
          
hasStaticScheduling(*NestedDir)21
)
780
9
        return true;
781
60
      if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
782
0
        return true;
783
60
      if (DKind == OMPD_parallel) {
784
24
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
785
24
            /*IgnoreCaptured=*/true);
786
24
        if (!Body)
787
0
          return false;
788
24
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
789
24
        if (const auto *NND =
790
24
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
791
21
          DKind = NND->getDirectiveKind();
792
21
          if (isOpenMPWorksharingDirective(DKind) &&
793
21
              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
794
6
            return true;
795
21
        }
796
36
      } else if (DKind == OMPD_teams) {
797
24
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
798
24
            /*IgnoreCaptured=*/true);
799
24
        if (!Body)
800
0
          return false;
801
24
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
802
24
        if (const auto *NND =
803
24
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
804
24
          DKind = NND->getDirectiveKind();
805
24
          if (isOpenMPParallelDirective(DKind) &&
806
24
              isOpenMPWorksharingDirective(DKind) &&
807
24
              
isOpenMPLoopDirective(DKind)21
&&
hasStaticScheduling(*NND)21
)
808
9
            return true;
809
15
          if (DKind == OMPD_parallel) {
810
3
            Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
811
3
                /*IgnoreCaptured=*/true);
812
3
            if (!Body)
813
0
              return false;
814
3
            ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
815
3
            if (const auto *NND =
816
3
                    dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
817
0
              DKind = NND->getDirectiveKind();
818
0
              if (isOpenMPWorksharingDirective(DKind) &&
819
0
                  isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
820
0
                return true;
821
0
            }
822
3
          }
823
15
        }
824
24
      }
825
45
      return false;
826
48
    case OMPD_target_teams:
827
48
      if (isOpenMPParallelDirective(DKind) &&
828
48
          isOpenMPWorksharingDirective(DKind) && 
isOpenMPLoopDirective(DKind)42
&&
829
48
          
hasStaticScheduling(*NestedDir)42
)
830
18
        return true;
831
30
      if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
832
0
        return true;
833
30
      if (DKind == OMPD_parallel) {
834
6
        Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
835
6
            /*IgnoreCaptured=*/true);
836
6
        if (!Body)
837
0
          return false;
838
6
        ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
839
6
        if (const auto *NND =
840
6
                dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
841
3
          DKind = NND->getDirectiveKind();
842
3
          if (isOpenMPWorksharingDirective(DKind) &&
843
3
              
isOpenMPLoopDirective(DKind)0
&&
hasStaticScheduling(*NND)0
)
844
0
            return true;
845
3
        }
846
6
      }
847
30
      return false;
848
21
    case OMPD_target_parallel:
849
21
      if (DKind == OMPD_simd)
850
0
        return true;
851
21
      return isOpenMPWorksharingDirective(DKind) &&
852
21
             isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
853
0
    case OMPD_target_teams_distribute:
854
0
    case OMPD_target_simd:
855
0
    case OMPD_target_parallel_for:
856
0
    case OMPD_target_parallel_for_simd:
857
0
    case OMPD_target_teams_distribute_simd:
858
0
    case OMPD_target_teams_distribute_parallel_for:
859
0
    case OMPD_target_teams_distribute_parallel_for_simd:
860
0
    case OMPD_parallel:
861
0
    case OMPD_for:
862
0
    case OMPD_parallel_for:
863
0
    case OMPD_parallel_master:
864
0
    case OMPD_parallel_sections:
865
0
    case OMPD_for_simd:
866
0
    case OMPD_parallel_for_simd:
867
0
    case OMPD_cancel:
868
0
    case OMPD_cancellation_point:
869
0
    case OMPD_ordered:
870
0
    case OMPD_threadprivate:
871
0
    case OMPD_allocate:
872
0
    case OMPD_task:
873
0
    case OMPD_simd:
874
0
    case OMPD_sections:
875
0
    case OMPD_section:
876
0
    case OMPD_single:
877
0
    case OMPD_master:
878
0
    case OMPD_critical:
879
0
    case OMPD_taskyield:
880
0
    case OMPD_barrier:
881
0
    case OMPD_taskwait:
882
0
    case OMPD_taskgroup:
883
0
    case OMPD_atomic:
884
0
    case OMPD_flush:
885
0
    case OMPD_depobj:
886
0
    case OMPD_scan:
887
0
    case OMPD_teams:
888
0
    case OMPD_target_data:
889
0
    case OMPD_target_exit_data:
890
0
    case OMPD_target_enter_data:
891
0
    case OMPD_distribute:
892
0
    case OMPD_distribute_simd:
893
0
    case OMPD_distribute_parallel_for:
894
0
    case OMPD_distribute_parallel_for_simd:
895
0
    case OMPD_teams_distribute:
896
0
    case OMPD_teams_distribute_simd:
897
0
    case OMPD_teams_distribute_parallel_for:
898
0
    case OMPD_teams_distribute_parallel_for_simd:
899
0
    case OMPD_target_update:
900
0
    case OMPD_declare_simd:
901
0
    case OMPD_declare_variant:
902
0
    case OMPD_begin_declare_variant:
903
0
    case OMPD_end_declare_variant:
904
0
    case OMPD_declare_target:
905
0
    case OMPD_end_declare_target:
906
0
    case OMPD_declare_reduction:
907
0
    case OMPD_declare_mapper:
908
0
    case OMPD_taskloop:
909
0
    case OMPD_taskloop_simd:
910
0
    case OMPD_master_taskloop:
911
0
    case OMPD_master_taskloop_simd:
912
0
    case OMPD_parallel_master_taskloop:
913
0
    case OMPD_parallel_master_taskloop_simd:
914
0
    case OMPD_requires:
915
0
    case OMPD_unknown:
916
0
    default:
917
0
      llvm_unreachable("Unexpected directive.");
918
138
    }
919
138
  }
920
921
59
  return false;
922
197
}
923
924
/// Checks if the construct supports lightweight runtime. It must be SPMD
925
/// construct + inner loop-based construct with static scheduling.
926
static bool supportsLightweightRuntime(ASTContext &Ctx,
927
356
                                       const OMPExecutableDirective &D) {
928
356
  if (!supportsSPMDExecutionMode(Ctx, D))
929
0
    return false;
930
356
  OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
931
356
  switch (DirectiveKind) {
932
69
  case OMPD_target:
933
117
  case OMPD_target_teams:
934
197
  case OMPD_target_parallel:
935
197
    return hasNestedLightweightDirective(Ctx, D);
936
24
  case OMPD_target_parallel_for:
937
24
  case OMPD_target_parallel_for_simd:
938
78
  case OMPD_target_teams_distribute_parallel_for:
939
111
  case OMPD_target_teams_distribute_parallel_for_simd:
940
    // (Last|First)-privates must be shared in parallel region.
941
111
    return hasStaticScheduling(D);
942
24
  case OMPD_target_simd:
943
48
  case OMPD_target_teams_distribute_simd:
944
48
    return true;
945
0
  case OMPD_target_teams_distribute:
946
0
    return false;
947
0
  case OMPD_parallel:
948
0
  case OMPD_for:
949
0
  case OMPD_parallel_for:
950
0
  case OMPD_parallel_master:
951
0
  case OMPD_parallel_sections:
952
0
  case OMPD_for_simd:
953
0
  case OMPD_parallel_for_simd:
954
0
  case OMPD_cancel:
955
0
  case OMPD_cancellation_point:
956
0
  case OMPD_ordered:
957
0
  case OMPD_threadprivate:
958
0
  case OMPD_allocate:
959
0
  case OMPD_task:
960
0
  case OMPD_simd:
961
0
  case OMPD_sections:
962
0
  case OMPD_section:
963
0
  case OMPD_single:
964
0
  case OMPD_master:
965
0
  case OMPD_critical:
966
0
  case OMPD_taskyield:
967
0
  case OMPD_barrier:
968
0
  case OMPD_taskwait:
969
0
  case OMPD_taskgroup:
970
0
  case OMPD_atomic:
971
0
  case OMPD_flush:
972
0
  case OMPD_depobj:
973
0
  case OMPD_scan:
974
0
  case OMPD_teams:
975
0
  case OMPD_target_data:
976
0
  case OMPD_target_exit_data:
977
0
  case OMPD_target_enter_data:
978
0
  case OMPD_distribute:
979
0
  case OMPD_distribute_simd:
980
0
  case OMPD_distribute_parallel_for:
981
0
  case OMPD_distribute_parallel_for_simd:
982
0
  case OMPD_teams_distribute:
983
0
  case OMPD_teams_distribute_simd:
984
0
  case OMPD_teams_distribute_parallel_for:
985
0
  case OMPD_teams_distribute_parallel_for_simd:
986
0
  case OMPD_target_update:
987
0
  case OMPD_declare_simd:
988
0
  case OMPD_declare_variant:
989
0
  case OMPD_begin_declare_variant:
990
0
  case OMPD_end_declare_variant:
991
0
  case OMPD_declare_target:
992
0
  case OMPD_end_declare_target:
993
0
  case OMPD_declare_reduction:
994
0
  case OMPD_declare_mapper:
995
0
  case OMPD_taskloop:
996
0
  case OMPD_taskloop_simd:
997
0
  case OMPD_master_taskloop:
998
0
  case OMPD_master_taskloop_simd:
999
0
  case OMPD_parallel_master_taskloop:
1000
0
  case OMPD_parallel_master_taskloop_simd:
1001
0
  case OMPD_requires:
1002
0
  case OMPD_unknown:
1003
0
  default:
1004
0
    break;
1005
356
  }
1006
0
  llvm_unreachable(
1007
0
      "Unknown programming model for OpenMP directive on NVPTX target.");
1008
0
}
1009
1010
void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1011
                                             StringRef ParentName,
1012
                                             llvm::Function *&OutlinedFn,
1013
                                             llvm::Constant *&OutlinedFnID,
1014
                                             bool IsOffloadEntry,
1015
158
                                             const RegionCodeGenTy &CodeGen) {
1016
158
  ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1017
158
  EntryFunctionState EST;
1018
158
  WrapperFunctionsMap.clear();
1019
1020
  // Emit target region as a standalone region.
1021
158
  class NVPTXPrePostActionTy : public PrePostActionTy {
1022
158
    CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1023
1024
158
  public:
1025
158
    NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1026
158
        : EST(EST) {}
1027
158
    void Enter(CodeGenFunction &CGF) override {
1028
158
      auto &RT =
1029
158
          static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1030
158
      RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
1031
      // Skip target region initialization.
1032
158
      RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1033
158
    }
1034
158
    void Exit(CodeGenFunction &CGF) override {
1035
158
      auto &RT =
1036
158
          static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1037
158
      RT.clearLocThreadIdInsertPt(CGF);
1038
158
      RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
1039
158
    }
1040
158
  } Action(EST);
1041
158
  CodeGen.setAction(Action);
1042
158
  IsInTTDRegion = true;
1043
158
  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1044
158
                                   IsOffloadEntry, CodeGen);
1045
158
  IsInTTDRegion = false;
1046
158
}
1047
1048
void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
1049
703
                                        EntryFunctionState &EST, bool IsSPMD) {
1050
703
  CGBuilderTy &Bld = CGF.Builder;
1051
703
  Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime()));
1052
703
  IsInTargetMasterThreadRegion = IsSPMD;
1053
703
  if (!IsSPMD)
1054
158
    emitGenericVarsProlog(CGF, EST.Loc);
1055
703
}
1056
1057
void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
1058
                                          EntryFunctionState &EST,
1059
703
                                          bool IsSPMD) {
1060
703
  if (!IsSPMD)
1061
158
    emitGenericVarsEpilog(CGF);
1062
1063
703
  CGBuilderTy &Bld = CGF.Builder;
1064
703
  OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime());
1065
703
}
1066
1067
void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1068
                                          StringRef ParentName,
1069
                                          llvm::Function *&OutlinedFn,
1070
                                          llvm::Constant *&OutlinedFnID,
1071
                                          bool IsOffloadEntry,
1072
545
                                          const RegionCodeGenTy &CodeGen) {
1073
545
  ExecutionRuntimeModesRAII ModeRAII(
1074
545
      CurrentExecutionMode, RequiresFullRuntime,
1075
545
      CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1076
545
          
!supportsLightweightRuntime(CGM.getContext(), D)356
);
1077
545
  EntryFunctionState EST;
1078
1079
  // Emit target region as a standalone region.
1080
545
  class NVPTXPrePostActionTy : public PrePostActionTy {
1081
545
    CGOpenMPRuntimeGPU &RT;
1082
545
    CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1083
1084
545
  public:
1085
545
    NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1086
545
                         CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1087
545
        : RT(RT), EST(EST) {}
1088
545
    void Enter(CodeGenFunction &CGF) override {
1089
545
      RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
1090
      // Skip target region initialization.
1091
545
      RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1092
545
    }
1093
545
    void Exit(CodeGenFunction &CGF) override {
1094
545
      RT.clearLocThreadIdInsertPt(CGF);
1095
545
      RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
1096
545
    }
1097
545
  } Action(*this, EST);
1098
545
  CodeGen.setAction(Action);
1099
545
  IsInTTDRegion = true;
1100
545
  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1101
545
                                   IsOffloadEntry, CodeGen);
1102
545
  IsInTTDRegion = false;
1103
545
}
1104
1105
// Create a unique global variable to indicate the execution mode of this target
1106
// region. The execution mode is either 'generic', or 'spmd' depending on the
1107
// target directive. This variable is picked up by the offload library to setup
1108
// the device appropriately before kernel launch. If the execution mode is
1109
// 'generic', the runtime reserves one warp for the master, otherwise, all
1110
// warps participate in parallel work.
1111
static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1112
703
                                     bool Mode) {
1113
703
  auto *GVMode = new llvm::GlobalVariable(
1114
703
      CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1115
703
      llvm::GlobalValue::WeakAnyLinkage,
1116
703
      llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 
OMP_TGT_EXEC_MODE_SPMD545
1117
703
                                              : 
OMP_TGT_EXEC_MODE_GENERIC158
),
1118
703
      Twine(Name, "_exec_mode"));
1119
703
  CGM.addCompilerUsedGlobal(GVMode);
1120
703
}
1121
1122
void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1123
                                              llvm::Constant *Addr,
1124
                                              uint64_t Size, int32_t,
1125
804
                                              llvm::GlobalValue::LinkageTypes) {
1126
  // TODO: Add support for global variables on the device after declare target
1127
  // support.
1128
804
  llvm::Function *Fn = dyn_cast<llvm::Function>(Addr);
1129
804
  if (!Fn)
1130
39
    return;
1131
1132
765
  llvm::Module &M = CGM.getModule();
1133
765
  llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1134
1135
  // Get "nvvm.annotations" metadata node.
1136
765
  llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1137
1138
765
  llvm::Metadata *MDVals[] = {
1139
765
      llvm::ConstantAsMetadata::get(Fn), llvm::MDString::get(Ctx, "kernel"),
1140
765
      llvm::ConstantAsMetadata::get(
1141
765
          llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1142
  // Append metadata to nvvm.annotations.
1143
765
  MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1144
1145
  // Add a function attribute for the kernel.
1146
765
  Fn->addFnAttr(llvm::Attribute::get(Ctx, "kernel"));
1147
765
}
1148
1149
void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1150
    const OMPExecutableDirective &D, StringRef ParentName,
1151
    llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1152
703
    bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1153
703
  if (!IsOffloadEntry) // Nothing to do.
1154
0
    return;
1155
1156
703
  assert(!ParentName.empty() && "Invalid target region parent name!");
1157
1158
0
  bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1159
703
  if (Mode)
1160
545
    emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1161
545
                   CodeGen);
1162
158
  else
1163
158
    emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1164
158
                      CodeGen);
1165
1166
703
  setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1167
703
}
1168
1169
namespace {
1170
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1171
/// Enum for accesseing the reserved_2 field of the ident_t struct.
1172
enum ModeFlagsTy : unsigned {
1173
  /// Bit set to 1 when in SPMD mode.
1174
  KMP_IDENT_SPMD_MODE = 0x01,
1175
  /// Bit set to 1 when a simplified runtime is used.
1176
  KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1177
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1178
};
1179
1180
/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1181
static const ModeFlagsTy UndefinedMode =
1182
    (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1183
} // anonymous namespace
1184
1185
2.85k
unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1186
2.85k
  switch (getExecutionMode()) {
1187
2.62k
  case EM_SPMD:
1188
2.62k
    if (requiresFullRuntime())
1189
1.83k
      return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1190
786
    return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1191
198
  case EM_NonSPMD:
1192
198
    assert(requiresFullRuntime() && "Expected full runtime.");
1193
0
    return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1194
30
  case EM_Unknown:
1195
30
    return UndefinedMode;
1196
2.85k
  }
1197
0
  llvm_unreachable("Unknown flags are requested.");
1198
0
}
1199
1200
CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1201
169
    : CGOpenMPRuntime(CGM, "_", "$") {
1202
169
  if (!CGM.getLangOpts().OpenMPIsDevice)
1203
0
    llvm_unreachable("OpenMP can only handle device code.");
1204
1205
169
  llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
1206
169
  if (CGM.getLangOpts().NoGPULib || 
CGM.getLangOpts().OMPHostIRFile.empty()168
)
1207
4
    return;
1208
1209
165
  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
1210
165
                              "__omp_rtl_debug_kind");
1211
165
  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,
1212
165
                              "__omp_rtl_assume_teams_oversubscription");
1213
165
  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription,
1214
165
                              "__omp_rtl_assume_threads_oversubscription");
1215
165
  OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoThreadState,
1216
165
                              "__omp_rtl_assume_no_thread_state");
1217
165
}
1218
1219
void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1220
                                              ProcBindKind ProcBind,
1221
25
                                              SourceLocation Loc) {
1222
  // Do nothing in case of SPMD mode and L0 parallel.
1223
25
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1224
25
    return;
1225
1226
0
  CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1227
0
}
1228
1229
void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1230
                                                llvm::Value *NumThreads,
1231
21
                                                SourceLocation Loc) {
1232
  // Nothing to do.
1233
21
}
1234
1235
void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1236
                                              const Expr *NumTeams,
1237
                                              const Expr *ThreadLimit,
1238
17
                                              SourceLocation Loc) {}
1239
1240
llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1241
    const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1242
560
    OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1243
  // Emit target region as a standalone region.
1244
560
  class NVPTXPrePostActionTy : public PrePostActionTy {
1245
560
    bool &IsInParallelRegion;
1246
560
    bool PrevIsInParallelRegion;
1247
1248
560
  public:
1249
560
    NVPTXPrePostActionTy(bool &IsInParallelRegion)
1250
560
        : IsInParallelRegion(IsInParallelRegion) {}
1251
560
    void Enter(CodeGenFunction &CGF) override {
1252
560
      PrevIsInParallelRegion = IsInParallelRegion;
1253
560
      IsInParallelRegion = true;
1254
560
    }
1255
560
    void Exit(CodeGenFunction &CGF) override {
1256
560
      IsInParallelRegion = PrevIsInParallelRegion;
1257
560
    }
1258
560
  } Action(IsInParallelRegion);
1259
560
  CodeGen.setAction(Action);
1260
560
  bool PrevIsInTTDRegion = IsInTTDRegion;
1261
560
  IsInTTDRegion = false;
1262
560
  bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1263
560
  IsInTargetMasterThreadRegion = false;
1264
560
  auto *OutlinedFun =
1265
560
      cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1266
560
          D, ThreadIDVar, InnermostKind, CodeGen));
1267
560
  IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1268
560
  IsInTTDRegion = PrevIsInTTDRegion;
1269
560
  if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1270
560
      
!IsInParallelRegion57
) {
1271
57
    llvm::Function *WrapperFun =
1272
57
        createParallelDataSharingWrapper(OutlinedFun, D);
1273
57
    WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1274
57
  }
1275
1276
560
  return OutlinedFun;
1277
560
}
1278
1279
/// Get list of lastprivate variables from the teams distribute ... or
1280
/// teams {distribute ...} directives.
1281
static void
1282
getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1283
288
                             llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1284
288
  assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1285
288
         "expected teams directive.");
1286
0
  const OMPExecutableDirective *Dir = &D;
1287
288
  if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1288
135
    if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1289
135
            Ctx,
1290
135
            D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1291
135
                /*IgnoreCaptured=*/true))) {
1292
135
      Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1293
135
      if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1294
9
        Dir = nullptr;
1295
135
    }
1296
135
  }
1297
288
  if (!Dir)
1298
9
    return;
1299
279
  for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1300
22
    for (const Expr *E : C->getVarRefs())
1301
22
      Vars.push_back(getPrivateItem(E));
1302
22
  }
1303
279
}
1304
1305
/// Get list of reduction variables from the teams ... directives.
1306
static void
1307
getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1308
39
                      llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1309
39
  assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1310
39
         "expected teams directive.");
1311
9
  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1312
9
    for (const Expr *E : C->privates())
1313
9
      Vars.push_back(getPrivateItem(E));
1314
9
  }
1315
39
}
1316
1317
llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1318
    const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1319
327
    OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1320
327
  SourceLocation Loc = D.getBeginLoc();
1321
1322
327
  const RecordDecl *GlobalizedRD = nullptr;
1323
327
  llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1324
327
  llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1325
327
  unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size;
1326
  // Globalize team reductions variable unconditionally in all modes.
1327
327
  if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1328
39
    getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1329
327
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1330
288
    getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1331
288
    if (!LastPrivatesReductions.empty()) {
1332
22
      GlobalizedRD = ::buildRecordForGlobalizedVars(
1333
22
          CGM.getContext(), llvm::None, LastPrivatesReductions,
1334
22
          MappedDeclsFields, WarpSize);
1335
22
    }
1336
288
  } else 
if (39
!LastPrivatesReductions.empty()39
) {
1337
6
    assert(!TeamAndReductions.first &&
1338
6
           "Previous team declaration is not expected.");
1339
0
    TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1340
6
    std::swap(TeamAndReductions.second, LastPrivatesReductions);
1341
6
  }
1342
1343
  // Emit target region as a standalone region.
1344
0
  class NVPTXPrePostActionTy : public PrePostActionTy {
1345
327
    SourceLocation &Loc;
1346
327
    const RecordDecl *GlobalizedRD;
1347
327
    llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1348
327
        &MappedDeclsFields;
1349
1350
327
  public:
1351
327
    NVPTXPrePostActionTy(
1352
327
        SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1353
327
        llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1354
327
            &MappedDeclsFields)
1355
327
        : Loc(Loc), GlobalizedRD(GlobalizedRD),
1356
327
          MappedDeclsFields(MappedDeclsFields) {}
1357
327
    void Enter(CodeGenFunction &CGF) override {
1358
327
      auto &Rt =
1359
327
          static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1360
327
      if (GlobalizedRD) {
1361
22
        auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1362
22
        I->getSecond().MappedParams =
1363
22
            std::make_unique<CodeGenFunction::OMPMapVars>();
1364
22
        DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1365
22
        for (const auto &Pair : MappedDeclsFields) {
1366
22
          assert(Pair.getFirst()->isCanonicalDecl() &&
1367
22
                 "Expected canonical declaration");
1368
0
          Data.insert(std::make_pair(Pair.getFirst(), MappedVarData()));
1369
22
        }
1370
22
      }
1371
327
      Rt.emitGenericVarsProlog(CGF, Loc);
1372
327
    }
1373
327
    void Exit(CodeGenFunction &CGF) override {
1374
327
      static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1375
327
          .emitGenericVarsEpilog(CGF);
1376
327
    }
1377
327
  } Action(Loc, GlobalizedRD, MappedDeclsFields);
1378
327
  CodeGen.setAction(Action);
1379
327
  llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1380
327
      D, ThreadIDVar, InnermostKind, CodeGen);
1381
1382
327
  return OutlinedFun;
1383
327
}
1384
1385
void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1386
                                                 SourceLocation Loc,
1387
498
                                                 bool WithSPMDCheck) {
1388
498
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1389
498
      
getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD68
)
1390
8
    return;
1391
1392
490
  CGBuilderTy &Bld = CGF.Builder;
1393
1394
490
  const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1395
490
  if (I == FunctionGlobalizedDecls.end())
1396
421
    return;
1397
1398
91
  
for (auto &Rec : I->getSecond().LocalVarData)69
{
1399
91
    const auto *VD = cast<VarDecl>(Rec.first);
1400
91
    bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1401
91
    QualType VarTy = VD->getType();
1402
1403
    // Get the local allocation of a firstprivate variable before sharing
1404
91
    llvm::Value *ParValue;
1405
91
    if (EscapedParam) {
1406
17
      LValue ParLVal =
1407
17
          CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1408
17
      ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1409
17
    }
1410
1411
    // Allocate space for the variable to be globalized
1412
91
    llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1413
91
    llvm::CallBase *VoidPtr =
1414
91
        CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1415
91
                                CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1416
91
                            AllocArgs, VD->getName());
1417
    // FIXME: We should use the variables actual alignment as an argument.
1418
91
    VoidPtr->addRetAttr(llvm::Attribute::get(
1419
91
        CGM.getLLVMContext(), llvm::Attribute::Alignment,
1420
91
        CGM.getContext().getTargetInfo().getNewAlign() / 8));
1421
1422
    // Cast the void pointer and get the address of the globalized variable.
1423
91
    llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
1424
91
    llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1425
91
        VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
1426
91
    LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
1427
91
    Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1428
91
    Rec.second.GlobalizedVal = VoidPtr;
1429
1430
    // Assign the local allocation to the newly globalized location.
1431
91
    if (EscapedParam) {
1432
17
      CGF.EmitStoreOfScalar(ParValue, VarAddr);
1433
17
      I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
1434
17
    }
1435
91
    if (auto *DI = CGF.getDebugInfo())
1436
0
      VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
1437
91
  }
1438
69
  for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
1439
    // Use actual memory size of the VLA object including the padding
1440
    // for alignment purposes.
1441
0
    llvm::Value *Size = CGF.getTypeSize(VD->getType());
1442
0
    CharUnits Align = CGM.getContext().getDeclAlign(VD);
1443
0
    Size = Bld.CreateNUWAdd(
1444
0
        Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1445
0
    llvm::Value *AlignVal =
1446
0
        llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1447
1448
0
    Size = Bld.CreateUDiv(Size, AlignVal);
1449
0
    Size = Bld.CreateNUWMul(Size, AlignVal);
1450
1451
    // Allocate space for this VLA object to be globalized.
1452
0
    llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1453
0
    llvm::CallBase *VoidPtr =
1454
0
        CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1455
0
                                CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1456
0
                            AllocArgs, VD->getName());
1457
0
    VoidPtr->addRetAttr(
1458
0
        llvm::Attribute::get(CGM.getLLVMContext(), llvm::Attribute::Alignment,
1459
0
                             CGM.getContext().getTargetInfo().getNewAlign()));
1460
1461
0
    I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
1462
0
        std::pair<llvm::Value *, llvm::Value *>(
1463
0
            {VoidPtr, CGF.getTypeSize(VD->getType())}));
1464
0
    LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
1465
0
                                     CGM.getContext().getDeclAlign(VD),
1466
0
                                     AlignmentSource::Decl);
1467
0
    I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1468
0
                                            Base.getAddress(CGF));
1469
0
  }
1470
69
  I->getSecond().MappedParams->apply(CGF);
1471
69
}
1472
1473
void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
1474
498
                                                 bool WithSPMDCheck) {
1475
498
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1476
498
      
getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD68
)
1477
8
    return;
1478
1479
490
  const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1480
490
  if (I != FunctionGlobalizedDecls.end()) {
1481
    // Deallocate the memory for each globalized VLA object
1482
69
    for (auto AddrSizePair :
1483
69
         llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
1484
0
      CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1485
0
                              CGM.getModule(), OMPRTL___kmpc_free_shared),
1486
0
                          {AddrSizePair.first, AddrSizePair.second});
1487
0
    }
1488
    // Deallocate the memory for each globalized value
1489
91
    for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) {
1490
91
      const auto *VD = cast<VarDecl>(Rec.first);
1491
91
      I->getSecond().MappedParams->restore(CGF);
1492
1493
91
      llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal,
1494
91
                                 CGF.getTypeSize(VD->getType())};
1495
91
      CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1496
91
                              CGM.getModule(), OMPRTL___kmpc_free_shared),
1497
91
                          FreeArgs);
1498
91
    }
1499
69
  }
1500
490
}
1501
1502
void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
1503
                                         const OMPExecutableDirective &D,
1504
                                         SourceLocation Loc,
1505
                                         llvm::Function *OutlinedFn,
1506
327
                                         ArrayRef<llvm::Value *> CapturedVars) {
1507
327
  if (!CGF.HaveInsertPoint())
1508
0
    return;
1509
1510
327
  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
1511
327
                                                      /*Name=*/".zero.addr");
1512
327
  CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
1513
327
  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1514
327
  OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
1515
327
  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1516
327
  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1517
327
  emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1518
327
}
1519
1520
void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
1521
                                          SourceLocation Loc,
1522
                                          llvm::Function *OutlinedFn,
1523
                                          ArrayRef<llvm::Value *> CapturedVars,
1524
                                          const Expr *IfCond,
1525
560
                                          llvm::Value *NumThreads) {
1526
560
  if (!CGF.HaveInsertPoint())
1527
0
    return;
1528
1529
560
  auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
1530
560
                        NumThreads](CodeGenFunction &CGF,
1531
560
                                    PrePostActionTy &Action) {
1532
560
    CGBuilderTy &Bld = CGF.Builder;
1533
560
    llvm::Value *NumThreadsVal = NumThreads;
1534
560
    llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
1535
560
    llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
1536
560
    if (WFn)
1537
57
      ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
1538
560
    llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
1539
1540
    // Create a private scope that will globalize the arguments
1541
    // passed from the outside of the target region.
1542
    // TODO: Is that needed?
1543
560
    CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
1544
1545
560
    Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
1546
560
        llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
1547
560
        "captured_vars_addrs");
1548
    // There's something to share.
1549
560
    if (!CapturedVars.empty()) {
1550
      // Prepare for parallel region. Indicate the outlined function.
1551
349
      ASTContext &Ctx = CGF.getContext();
1552
349
      unsigned Idx = 0;
1553
814
      for (llvm::Value *V : CapturedVars) {
1554
814
        Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
1555
814
        llvm::Value *PtrV;
1556
814
        if (V->getType()->isIntegerTy())
1557
604
          PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
1558
210
        else
1559
210
          PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
1560
814
        CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
1561
814
                              Ctx.getPointerType(Ctx.VoidPtrTy));
1562
814
        ++Idx;
1563
814
      }
1564
349
    }
1565
1566
560
    llvm::Value *IfCondVal = nullptr;
1567
560
    if (IfCond)
1568
22
      IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty,
1569
22
                                    /* isSigned */ false);
1570
538
    else
1571
538
      IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
1572
1573
560
    if (!NumThreadsVal)
1574
539
      NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1);
1575
21
    else
1576
21
      NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty),
1577
1578
21
      assert(IfCondVal && "Expected a value");
1579
0
    llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1580
560
    llvm::Value *Args[] = {
1581
560
        RTLoc,
1582
560
        getThreadID(CGF, Loc),
1583
560
        IfCondVal,
1584
560
        NumThreadsVal,
1585
560
        llvm::ConstantInt::get(CGF.Int32Ty, -1),
1586
560
        FnPtr,
1587
560
        ID,
1588
560
        Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
1589
560
                                   CGF.VoidPtrPtrTy),
1590
560
        llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
1591
560
    CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1592
560
                            CGM.getModule(), OMPRTL___kmpc_parallel_51),
1593
560
                        Args);
1594
560
  };
1595
1596
560
  RegionCodeGenTy RCG(ParallelGen);
1597
560
  RCG(CGF);
1598
560
}
1599
1600
0
void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
1601
  // Always emit simple barriers!
1602
0
  if (!CGF.HaveInsertPoint())
1603
0
    return;
1604
  // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
1605
  // This function does not use parameters, so we can emit just default values.
1606
0
  llvm::Value *Args[] = {
1607
0
      llvm::ConstantPointerNull::get(
1608
0
          cast<llvm::PointerType>(getIdentTyPointerTy())),
1609
0
      llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
1610
0
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1611
0
                          CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
1612
0
                      Args);
1613
0
}
1614
1615
void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
1616
                                           SourceLocation Loc,
1617
                                           OpenMPDirectiveKind Kind, bool,
1618
172
                                           bool) {
1619
  // Always emit simple barriers!
1620
172
  if (!CGF.HaveInsertPoint())
1621
0
    return;
1622
  // Build call __kmpc_cancel_barrier(loc, thread_id);
1623
172
  unsigned Flags = getDefaultFlagsForBarriers(Kind);
1624
172
  llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
1625
172
                         getThreadID(CGF, Loc)};
1626
1627
172
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1628
172
                          CGM.getModule(), OMPRTL___kmpc_barrier),
1629
172
                      Args);
1630
172
}
1631
1632
void CGOpenMPRuntimeGPU::emitCriticalRegion(
1633
    CodeGenFunction &CGF, StringRef CriticalName,
1634
    const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
1635
2
    const Expr *Hint) {
1636
2
  llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
1637
2
  llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
1638
2
  llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
1639
2
  llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
1640
2
  llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
1641
1642
2
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1643
1644
  // Get the mask of active threads in the warp.
1645
2
  llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1646
2
      CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
1647
  // Fetch team-local id of the thread.
1648
2
  llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
1649
1650
  // Get the width of the team.
1651
2
  llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
1652
1653
  // Initialize the counter variable for the loop.
1654
2
  QualType Int32Ty =
1655
2
      CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
1656
2
  Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
1657
2
  LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
1658
2
  CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
1659
2
                        /*isInit=*/true);
1660
1661
  // Block checks if loop counter exceeds upper bound.
1662
2
  CGF.EmitBlock(LoopBB);
1663
2
  llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1664
2
  llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
1665
2
  CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
1666
1667
  // Block tests which single thread should execute region, and which threads
1668
  // should go straight to synchronisation point.
1669
2
  CGF.EmitBlock(TestBB);
1670
2
  CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1671
2
  llvm::Value *CmpThreadToCounter =
1672
2
      CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
1673
2
  CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
1674
1675
  // Block emits the body of the critical region.
1676
2
  CGF.EmitBlock(BodyBB);
1677
1678
  // Output the critical statement.
1679
2
  CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
1680
2
                                      Hint);
1681
1682
  // After the body surrounded by the critical region, the single executing
1683
  // thread will jump to the synchronisation point.
1684
  // Block waits for all threads in current team to finish then increments the
1685
  // counter variable and returns to the loop.
1686
2
  CGF.EmitBlock(SyncBB);
1687
  // Reconverge active threads in the warp.
1688
2
  (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1689
2
                                CGM.getModule(), OMPRTL___kmpc_syncwarp),
1690
2
                            Mask);
1691
1692
2
  llvm::Value *IncCounterVal =
1693
2
      CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
1694
2
  CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
1695
2
  CGF.EmitBranch(LoopBB);
1696
1697
  // Block that is reached when  all threads in the team complete the region.
1698
2
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1699
2
}
1700
1701
/// Cast value to the specified type.
1702
static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
1703
                                    QualType ValTy, QualType CastTy,
1704
86
                                    SourceLocation Loc) {
1705
86
  assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
1706
86
         "Cast type must sized.");
1707
0
  assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
1708
86
         "Val type must sized.");
1709
0
  llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
1710
86
  if (ValTy == CastTy)
1711
56
    return Val;
1712
30
  if (CGF.getContext().getTypeSizeInChars(ValTy) ==
1713
30
      CGF.getContext().getTypeSizeInChars(CastTy))
1714
0
    return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
1715
30
  if (CastTy->isIntegerType() && ValTy->isIntegerType())
1716
30
    return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
1717
30
                                     CastTy->hasSignedIntegerRepresentation());
1718
0
  Address CastItem = CGF.CreateMemTemp(CastTy);
1719
0
  Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1720
0
      CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()),
1721
0
      Val->getType());
1722
0
  CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
1723
0
                        LValueBaseInfo(AlignmentSource::Type),
1724
0
                        TBAAAccessInfo());
1725
0
  return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
1726
0
                              LValueBaseInfo(AlignmentSource::Type),
1727
0
                              TBAAAccessInfo());
1728
30
}
1729
1730
/// This function creates calls to one of two shuffle functions to copy
1731
/// variables between lanes in a warp.
1732
static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
1733
                                                 llvm::Value *Elem,
1734
                                                 QualType ElemType,
1735
                                                 llvm::Value *Offset,
1736
43
                                                 SourceLocation Loc) {
1737
43
  CodeGenModule &CGM = CGF.CGM;
1738
43
  CGBuilderTy &Bld = CGF.Builder;
1739
43
  CGOpenMPRuntimeGPU &RT =
1740
43
      *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
1741
43
  llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
1742
1743
43
  CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1744
43
  assert(Size.getQuantity() <= 8 &&
1745
43
         "Unsupported bitwidth in shuffle instruction.");
1746
1747
43
  RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
1748
43
                                  ? 
OMPRTL___kmpc_shuffle_int3230
1749
43
                                  : 
OMPRTL___kmpc_shuffle_int6413
;
1750
1751
  // Cast all types to 32- or 64-bit values before calling shuffle routines.
1752
43
  QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
1753
43
      Size.getQuantity() <= 4 ? 
3230
:
6413
, /*Signed=*/1);
1754
43
  llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
1755
43
  llvm::Value *WarpSize =
1756
43
      Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
1757
1758
43
  llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
1759
43
      OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
1760
43
      {ElemCast, Offset, WarpSize});
1761
1762
43
  return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
1763
43
}
1764
1765
static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
1766
                            Address DestAddr, QualType ElemType,
1767
43
                            llvm::Value *Offset, SourceLocation Loc) {
1768
43
  CGBuilderTy &Bld = CGF.Builder;
1769
1770
43
  CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1771
  // Create the loop over the big sized data.
1772
  // ptr = (void*)Elem;
1773
  // ptrEnd = (void*) Elem + 1;
1774
  // Step = 8;
1775
  // while (ptr + Step < ptrEnd)
1776
  //   shuffle((int64_t)*ptr);
1777
  // Step = 4;
1778
  // while (ptr + Step < ptrEnd)
1779
  //   shuffle((int32_t)*ptr);
1780
  // ...
1781
43
  Address ElemPtr = DestAddr;
1782
43
  Address Ptr = SrcAddr;
1783
43
  Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
1784
43
      Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy, CGF.Int8Ty);
1785
215
  for (int IntSize = 8; IntSize >= 1; 
IntSize /= 2172
) {
1786
172
    if (Size < CharUnits::fromQuantity(IntSize))
1787
129
      continue;
1788
43
    QualType IntType = CGF.getContext().getIntTypeForBitwidth(
1789
43
        CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
1790
43
        /*Signed=*/1);
1791
43
    llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
1792
43
    Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo(),
1793
43
                                                  IntTy);
1794
43
    ElemPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1795
43
        ElemPtr, IntTy->getPointerTo(), IntTy);
1796
43
    if (Size.getQuantity() / IntSize > 1) {
1797
3
      llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
1798
3
      llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
1799
3
      llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
1800
3
      llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
1801
3
      CGF.EmitBlock(PreCondBB);
1802
3
      llvm::PHINode *PhiSrc =
1803
3
          Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
1804
3
      PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
1805
3
      llvm::PHINode *PhiDest =
1806
3
          Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
1807
3
      PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
1808
3
      Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment());
1809
3
      ElemPtr =
1810
3
          Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment());
1811
3
      llvm::Value *PtrDiff = Bld.CreatePtrDiff(
1812
3
          CGF.Int8Ty, PtrEnd.getPointer(),
1813
3
          Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
1814
3
                                                  CGF.VoidPtrTy));
1815
3
      Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
1816
3
                       ThenBB, ExitBB);
1817
3
      CGF.EmitBlock(ThenBB);
1818
3
      llvm::Value *Res = createRuntimeShuffleFunction(
1819
3
          CGF,
1820
3
          CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1821
3
                               LValueBaseInfo(AlignmentSource::Type),
1822
3
                               TBAAAccessInfo()),
1823
3
          IntType, Offset, Loc);
1824
3
      CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1825
3
                            LValueBaseInfo(AlignmentSource::Type),
1826
3
                            TBAAAccessInfo());
1827
3
      Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
1828
3
      Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1829
3
      PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
1830
3
      PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
1831
3
      CGF.EmitBranch(PreCondBB);
1832
3
      CGF.EmitBlock(ExitBB);
1833
40
    } else {
1834
40
      llvm::Value *Res = createRuntimeShuffleFunction(
1835
40
          CGF,
1836
40
          CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1837
40
                               LValueBaseInfo(AlignmentSource::Type),
1838
40
                               TBAAAccessInfo()),
1839
40
          IntType, Offset, Loc);
1840
40
      CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1841
40
                            LValueBaseInfo(AlignmentSource::Type),
1842
40
                            TBAAAccessInfo());
1843
40
      Ptr = Bld.CreateConstGEP(Ptr, 1);
1844
40
      ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1845
40
    }
1846
43
    Size = Size % IntSize;
1847
43
  }
1848
43
}
1849
1850
namespace {
1851
enum CopyAction : unsigned {
1852
  // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1853
  // the warp using shuffle instructions.
1854
  RemoteLaneToThread,
1855
  // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1856
  ThreadCopy,
1857
  // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1858
  ThreadToScratchpad,
1859
  // ScratchpadToThread: Copy from a scratchpad array in global memory
1860
  // containing team-reduced data to a thread's stack.
1861
  ScratchpadToThread,
1862
};
1863
} // namespace
1864
1865
struct CopyOptionsTy {
1866
  llvm::Value *RemoteLaneOffset;
1867
  llvm::Value *ScratchpadIndex;
1868
  llvm::Value *ScratchpadWidth;
1869
};
1870
1871
/// Emit instructions to copy a Reduce list, which contains partially
1872
/// aggregated values, in the specified direction.
1873
static void emitReductionListCopy(
1874
    CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1875
    ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1876
56
    CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
1877
1878
56
  CodeGenModule &CGM = CGF.CGM;
1879
56
  ASTContext &C = CGM.getContext();
1880
56
  CGBuilderTy &Bld = CGF.Builder;
1881
1882
56
  llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1883
56
  llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1884
56
  llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1885
1886
  // Iterates, element-by-element, through the source Reduce list and
1887
  // make a copy.
1888
56
  unsigned Idx = 0;
1889
56
  unsigned Size = Privates.size();
1890
86
  for (const Expr *Private : Privates) {
1891
86
    Address SrcElementAddr = Address::invalid();
1892
86
    Address DestElementAddr = Address::invalid();
1893
86
    Address DestElementPtrAddr = Address::invalid();
1894
    // Should we shuffle in an element from a remote lane?
1895
86
    bool ShuffleInElement = false;
1896
    // Set to true to update the pointer in the dest Reduce list to a
1897
    // newly created element.
1898
86
    bool UpdateDestListPtr = false;
1899
    // Increment the src or dest pointer to the scratchpad, for each
1900
    // new element.
1901
86
    bool IncrScratchpadSrc = false;
1902
86
    bool IncrScratchpadDest = false;
1903
86
    QualType PrivatePtrType = C.getPointerType(Private->getType());
1904
86
    llvm::Type *PrivateLlvmPtrType = CGF.ConvertType(PrivatePtrType);
1905
1906
86
    switch (Action) {
1907
43
    case RemoteLaneToThread: {
1908
      // Step 1.1: Get the address for the src element in the Reduce list.
1909
43
      Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1910
43
      SrcElementAddr =
1911
43
          CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1912
43
                                    SrcElementPtrAddr, PrivateLlvmPtrType),
1913
43
                                PrivatePtrType->castAs<PointerType>());
1914
1915
      // Step 1.2: Create a temporary to store the element in the destination
1916
      // Reduce list.
1917
43
      DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1918
43
      DestElementAddr =
1919
43
          CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1920
43
      ShuffleInElement = true;
1921
43
      UpdateDestListPtr = true;
1922
43
      break;
1923
0
    }
1924
43
    case ThreadCopy: {
1925
      // Step 1.1: Get the address for the src element in the Reduce list.
1926
43
      Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1927
43
      SrcElementAddr =
1928
43
          CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1929
43
                                    SrcElementPtrAddr, PrivateLlvmPtrType),
1930
43
                                PrivatePtrType->castAs<PointerType>());
1931
1932
      // Step 1.2: Get the address for dest element.  The destination
1933
      // element has already been created on the thread's stack.
1934
43
      DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1935
43
      DestElementAddr =
1936
43
          CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1937
43
                                    DestElementPtrAddr, PrivateLlvmPtrType),
1938
43
                                PrivatePtrType->castAs<PointerType>());
1939
43
      break;
1940
0
    }
1941
0
    case ThreadToScratchpad: {
1942
      // Step 1.1: Get the address for the src element in the Reduce list.
1943
0
      Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1944
0
      SrcElementAddr =
1945
0
          CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1946
0
                                    SrcElementPtrAddr, PrivateLlvmPtrType),
1947
0
                                PrivatePtrType->castAs<PointerType>());
1948
1949
      // Step 1.2: Get the address for dest element:
1950
      // address = base + index * ElementSizeInChars.
1951
0
      llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1952
0
      llvm::Value *CurrentOffset =
1953
0
          Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1954
0
      llvm::Value *ScratchPadElemAbsolutePtrVal =
1955
0
          Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
1956
0
      ScratchPadElemAbsolutePtrVal =
1957
0
          Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1958
0
      DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
1959
0
                                C.getTypeAlignInChars(Private->getType()));
1960
0
      IncrScratchpadDest = true;
1961
0
      break;
1962
0
    }
1963
0
    case ScratchpadToThread: {
1964
      // Step 1.1: Get the address for the src element in the scratchpad.
1965
      // address = base + index * ElementSizeInChars.
1966
0
      llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1967
0
      llvm::Value *CurrentOffset =
1968
0
          Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1969
0
      llvm::Value *ScratchPadElemAbsolutePtrVal =
1970
0
          Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
1971
0
      ScratchPadElemAbsolutePtrVal =
1972
0
          Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1973
0
      SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
1974
0
                               C.getTypeAlignInChars(Private->getType()));
1975
0
      IncrScratchpadSrc = true;
1976
1977
      // Step 1.2: Create a temporary to store the element in the destination
1978
      // Reduce list.
1979
0
      DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1980
0
      DestElementAddr =
1981
0
          CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1982
0
      UpdateDestListPtr = true;
1983
0
      break;
1984
0
    }
1985
86
    }
1986
1987
    // Regardless of src and dest of copy, we emit the load of src
1988
    // element as this is required in all directions
1989
86
    SrcElementAddr = Bld.CreateElementBitCast(
1990
86
        SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1991
86
    DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
1992
86
                                               SrcElementAddr.getElementType());
1993
1994
    // Now that all active lanes have read the element in the
1995
    // Reduce list, shuffle over the value from the remote lane.
1996
86
    if (ShuffleInElement) {
1997
43
      shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
1998
43
                      RemoteLaneOffset, Private->getExprLoc());
1999
43
    } else {
2000
43
      switch (CGF.getEvaluationKind(Private->getType())) {
2001
37
      case TEK_Scalar: {
2002
37
        llvm::Value *Elem = CGF.EmitLoadOfScalar(
2003
37
            SrcElementAddr, /*Volatile=*/false, Private->getType(),
2004
37
            Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
2005
37
            TBAAAccessInfo());
2006
        // Store the source element value to the dest element address.
2007
37
        CGF.EmitStoreOfScalar(
2008
37
            Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
2009
37
            LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2010
37
        break;
2011
0
      }
2012
0
      case TEK_Complex: {
2013
0
        CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
2014
0
            CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2015
0
            Private->getExprLoc());
2016
0
        CGF.EmitStoreOfComplex(
2017
0
            Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2018
0
            /*isInit=*/false);
2019
0
        break;
2020
0
      }
2021
6
      case TEK_Aggregate:
2022
6
        CGF.EmitAggregateCopy(
2023
6
            CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2024
6
            CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2025
6
            Private->getType(), AggValueSlot::DoesNotOverlap);
2026
6
        break;
2027
43
      }
2028
43
    }
2029
2030
    // Step 3.1: Modify reference in dest Reduce list as needed.
2031
    // Modifying the reference in Reduce list to point to the newly
2032
    // created element.  The element is live in the current function
2033
    // scope and that of functions it invokes (i.e., reduce_function).
2034
    // RemoteReduceData[i] = (void*)&RemoteElem
2035
86
    if (UpdateDestListPtr) {
2036
43
      CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
2037
43
                                DestElementAddr.getPointer(), CGF.VoidPtrTy),
2038
43
                            DestElementPtrAddr, /*Volatile=*/false,
2039
43
                            C.VoidPtrTy);
2040
43
    }
2041
2042
    // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2043
    // address of the next element in scratchpad memory, unless we're currently
2044
    // processing the last one.  Memory alignment is also taken care of here.
2045
86
    if ((IncrScratchpadDest || IncrScratchpadSrc) && 
(Idx + 1 < Size)0
) {
2046
      // FIXME: This code doesn't make any sense, it's trying to perform
2047
      // integer arithmetic on pointers.
2048
0
      llvm::Value *ScratchpadBasePtr =
2049
0
          IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2050
0
      llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2051
0
      ScratchpadBasePtr = Bld.CreateNUWAdd(
2052
0
          ScratchpadBasePtr,
2053
0
          Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2054
2055
      // Take care of global memory alignment for performance
2056
0
      ScratchpadBasePtr = Bld.CreateNUWSub(
2057
0
          ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2058
0
      ScratchpadBasePtr = Bld.CreateUDiv(
2059
0
          ScratchpadBasePtr,
2060
0
          llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2061
0
      ScratchpadBasePtr = Bld.CreateNUWAdd(
2062
0
          ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2063
0
      ScratchpadBasePtr = Bld.CreateNUWMul(
2064
0
          ScratchpadBasePtr,
2065
0
          llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2066
2067
0
      if (IncrScratchpadDest)
2068
0
        DestBase =
2069
0
            Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
2070
0
      else /* IncrScratchpadSrc = true */
2071
0
        SrcBase =
2072
0
            Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
2073
0
    }
2074
2075
86
    ++Idx;
2076
86
  }
2077
56
}
2078
2079
/// This function emits a helper that gathers Reduce lists from the first
2080
/// lane of every active warp to lanes in the first warp.
2081
///
2082
/// void inter_warp_copy_func(void* reduce_data, num_warps)
2083
///   shared smem[warp_size];
2084
///   For all data entries D in reduce_data:
2085
///     sync
2086
///     If (I am the first lane in each warp)
2087
///       Copy my local D to smem[warp_id]
2088
///     sync
2089
///     if (I am the first warp)
2090
///       Copy smem[thread_id] to my local D
2091
static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2092
                                              ArrayRef<const Expr *> Privates,
2093
                                              QualType ReductionArrayTy,
2094
28
                                              SourceLocation Loc) {
2095
28
  ASTContext &C = CGM.getContext();
2096
28
  llvm::Module &M = CGM.getModule();
2097
2098
  // ReduceList: thread local Reduce list.
2099
  // At the stage of the computation when this function is called, partially
2100
  // aggregated values reside in the first lane of every active warp.
2101
28
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2102
28
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
2103
  // NumWarps: number of warps active in the parallel region.  This could
2104
  // be smaller than 32 (max warps in a CTA) for partial block reduction.
2105
28
  ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2106
28
                                C.getIntTypeForBitwidth(32, /* Signed */ true),
2107
28
                                ImplicitParamDecl::Other);
2108
28
  FunctionArgList Args;
2109
28
  Args.push_back(&ReduceListArg);
2110
28
  Args.push_back(&NumWarpsArg);
2111
2112
28
  const CGFunctionInfo &CGFI =
2113
28
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2114
28
  auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2115
28
                                    llvm::GlobalValue::InternalLinkage,
2116
28
                                    "_omp_reduction_inter_warp_copy_func", &M);
2117
28
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2118
28
  Fn->setDoesNotRecurse();
2119
28
  CodeGenFunction CGF(CGM);
2120
28
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2121
2122
28
  CGBuilderTy &Bld = CGF.Builder;
2123
2124
  // This array is used as a medium to transfer, one reduce element at a time,
2125
  // the data from the first lane of every warp to lanes in the first warp
2126
  // in order to perform the final step of a reduction in a parallel region
2127
  // (reduction across warps).  The array is placed in NVPTX __shared__ memory
2128
  // for reduced latency, as well as to have a distinct copy for concurrently
2129
  // executing target regions.  The array is declared with common linkage so
2130
  // as to be shared across compilation units.
2131
28
  StringRef TransferMediumName =
2132
28
      "__openmp_nvptx_data_transfer_temporary_storage";
2133
28
  llvm::GlobalVariable *TransferMedium =
2134
28
      M.getGlobalVariable(TransferMediumName);
2135
28
  unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
2136
28
  if (!TransferMedium) {
2137
10
    auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2138
10
    unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2139
10
    TransferMedium = new llvm::GlobalVariable(
2140
10
        M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
2141
10
        llvm::UndefValue::get(Ty), TransferMediumName,
2142
10
        /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2143
10
        SharedAddressSpace);
2144
10
    CGM.addCompilerUsedGlobal(TransferMedium);
2145
10
  }
2146
2147
28
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2148
  // Get the CUDA thread id of the current OpenMP thread on the GPU.
2149
28
  llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2150
  // nvptx_lane_id = nvptx_id % warpsize
2151
28
  llvm::Value *LaneID = getNVPTXLaneID(CGF);
2152
  // nvptx_warp_id = nvptx_id / warpsize
2153
28
  llvm::Value *WarpID = getNVPTXWarpID(CGF);
2154
2155
28
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2156
28
  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2157
28
  Address LocalReduceList(
2158
28
      Bld.CreatePointerBitCastOrAddrSpaceCast(
2159
28
          CGF.EmitLoadOfScalar(
2160
28
              AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2161
28
              LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
2162
28
          ElemTy->getPointerTo()),
2163
28
      ElemTy, CGF.getPointerAlign());
2164
2165
28
  unsigned Idx = 0;
2166
43
  for (const Expr *Private : Privates) {
2167
    //
2168
    // Warp master copies reduce element to transfer medium in __shared__
2169
    // memory.
2170
    //
2171
43
    unsigned RealTySize =
2172
43
        C.getTypeSizeInChars(Private->getType())
2173
43
            .alignTo(C.getTypeAlignInChars(Private->getType()))
2174
43
            .getQuantity();
2175
107
    for (unsigned TySize = 4; TySize > 0 && 
RealTySize > 0101
;
TySize /=264
) {
2176
64
      unsigned NumIters = RealTySize / TySize;
2177
64
      if (NumIters == 0)
2178
21
        continue;
2179
43
      QualType CType = C.getIntTypeForBitwidth(
2180
43
          C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2181
43
      llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2182
43
      CharUnits Align = CharUnits::fromQuantity(TySize);
2183
43
      llvm::Value *Cnt = nullptr;
2184
43
      Address CntAddr = Address::invalid();
2185
43
      llvm::BasicBlock *PrecondBB = nullptr;
2186
43
      llvm::BasicBlock *ExitBB = nullptr;
2187
43
      if (NumIters > 1) {
2188
13
        CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2189
13
        CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2190
13
                              /*Volatile=*/false, C.IntTy);
2191
13
        PrecondBB = CGF.createBasicBlock("precond");
2192
13
        ExitBB = CGF.createBasicBlock("exit");
2193
13
        llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2194
        // There is no need to emit line number for unconditional branch.
2195
13
        (void)ApplyDebugLocation::CreateEmpty(CGF);
2196
13
        CGF.EmitBlock(PrecondBB);
2197
13
        Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2198
13
        llvm::Value *Cmp =
2199
13
            Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2200
13
        Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2201
13
        CGF.EmitBlock(BodyBB);
2202
13
      }
2203
      // kmpc_barrier.
2204
43
      CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2205
43
                                             /*EmitChecks=*/false,
2206
43
                                             /*ForceSimpleCall=*/true);
2207
43
      llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2208
43
      llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2209
43
      llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2210
2211
      // if (lane_id == 0)
2212
43
      llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2213
43
      Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2214
43
      CGF.EmitBlock(ThenBB);
2215
2216
      // Reduce element = LocalReduceList[i]
2217
43
      Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2218
43
      llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2219
43
          ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2220
      // elemptr = ((CopyType*)(elemptrptr)) + I
2221
43
      Address ElemPtr(ElemPtrPtr, CGF.Int8Ty, Align);
2222
43
      ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2223
43
      if (NumIters > 1)
2224
13
        ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
2225
2226
      // Get pointer to location in transfer medium.
2227
      // MediumPtr = &medium[warp_id]
2228
43
      llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2229
43
          TransferMedium->getValueType(), TransferMedium,
2230
43
          {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2231
      // Casting to actual data type.
2232
      // MediumPtr = (CopyType*)MediumPtrAddr;
2233
43
      Address MediumPtr(
2234
43
          Bld.CreateBitCast(
2235
43
              MediumPtrVal,
2236
43
              CopyType->getPointerTo(
2237
43
                  MediumPtrVal->getType()->getPointerAddressSpace())),
2238
43
          CopyType, Align);
2239
2240
      // elem = *elemptr
2241
      //*MediumPtr = elem
2242
43
      llvm::Value *Elem = CGF.EmitLoadOfScalar(
2243
43
          ElemPtr, /*Volatile=*/false, CType, Loc,
2244
43
          LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2245
      // Store the source element value to the dest element address.
2246
43
      CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2247
43
                            LValueBaseInfo(AlignmentSource::Type),
2248
43
                            TBAAAccessInfo());
2249
2250
43
      Bld.CreateBr(MergeBB);
2251
2252
43
      CGF.EmitBlock(ElseBB);
2253
43
      Bld.CreateBr(MergeBB);
2254
2255
43
      CGF.EmitBlock(MergeBB);
2256
2257
      // kmpc_barrier.
2258
43
      CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2259
43
                                             /*EmitChecks=*/false,
2260
43
                                             /*ForceSimpleCall=*/true);
2261
2262
      //
2263
      // Warp 0 copies reduce element from transfer medium.
2264
      //
2265
43
      llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2266
43
      llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2267
43
      llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2268
2269
43
      Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2270
43
      llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2271
43
          AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2272
2273
      // Up to 32 threads in warp 0 are active.
2274
43
      llvm::Value *IsActiveThread =
2275
43
          Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2276
43
      Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2277
2278
43
      CGF.EmitBlock(W0ThenBB);
2279
2280
      // SrcMediumPtr = &medium[tid]
2281
43
      llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2282
43
          TransferMedium->getValueType(), TransferMedium,
2283
43
          {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2284
      // SrcMediumVal = *SrcMediumPtr;
2285
43
      Address SrcMediumPtr(
2286
43
          Bld.CreateBitCast(
2287
43
              SrcMediumPtrVal,
2288
43
              CopyType->getPointerTo(
2289
43
                  SrcMediumPtrVal->getType()->getPointerAddressSpace())),
2290
43
          CopyType, Align);
2291
2292
      // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
2293
43
      Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2294
43
      llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
2295
43
          TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
2296
43
      Address TargetElemPtr(TargetElemPtrVal, CGF.Int8Ty, Align);
2297
43
      TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
2298
43
      if (NumIters > 1)
2299
13
        TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
2300
2301
      // *TargetElemPtr = SrcMediumVal;
2302
43
      llvm::Value *SrcMediumValue =
2303
43
          CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
2304
43
      CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
2305
43
                            CType);
2306
43
      Bld.CreateBr(W0MergeBB);
2307
2308
43
      CGF.EmitBlock(W0ElseBB);
2309
43
      Bld.CreateBr(W0MergeBB);
2310
2311
43
      CGF.EmitBlock(W0MergeBB);
2312
2313
43
      if (NumIters > 1) {
2314
13
        Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
2315
13
        CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
2316
13
        CGF.EmitBranch(PrecondBB);
2317
13
        (void)ApplyDebugLocation::CreateEmpty(CGF);
2318
13
        CGF.EmitBlock(ExitBB);
2319
13
      }
2320
43
      RealTySize %= TySize;
2321
43
    }
2322
43
    ++Idx;
2323
43
  }
2324
2325
28
  CGF.FinishFunction();
2326
28
  return Fn;
2327
28
}
2328
2329
/// Emit a helper that reduces data across two OpenMP threads (lanes)
2330
/// in the same warp.  It uses shuffle instructions to copy over data from
2331
/// a remote lane's stack.  The reduction algorithm performed is specified
2332
/// by the fourth parameter.
2333
///
2334
/// Algorithm Versions.
2335
/// Full Warp Reduce (argument value 0):
2336
///   This algorithm assumes that all 32 lanes are active and gathers
2337
///   data from these 32 lanes, producing a single resultant value.
2338
/// Contiguous Partial Warp Reduce (argument value 1):
2339
///   This algorithm assumes that only a *contiguous* subset of lanes
2340
///   are active.  This happens for the last warp in a parallel region
2341
///   when the user specified num_threads is not an integer multiple of
2342
///   32.  This contiguous subset always starts with the zeroth lane.
2343
/// Partial Warp Reduce (argument value 2):
2344
///   This algorithm gathers data from any number of lanes at any position.
2345
/// All reduced values are stored in the lowest possible lane.  The set
2346
/// of problems every algorithm addresses is a super set of those
2347
/// addressable by algorithms with a lower version number.  Overhead
2348
/// increases as algorithm version increases.
2349
///
2350
/// Terminology
2351
/// Reduce element:
2352
///   Reduce element refers to the individual data field with primitive
2353
///   data types to be combined and reduced across threads.
2354
/// Reduce list:
2355
///   Reduce list refers to a collection of local, thread-private
2356
///   reduce elements.
2357
/// Remote Reduce list:
2358
///   Remote Reduce list refers to a collection of remote (relative to
2359
///   the current thread) reduce elements.
2360
///
2361
/// We distinguish between three states of threads that are important to
2362
/// the implementation of this function.
2363
/// Alive threads:
2364
///   Threads in a warp executing the SIMT instruction, as distinguished from
2365
///   threads that are inactive due to divergent control flow.
2366
/// Active threads:
2367
///   The minimal set of threads that has to be alive upon entry to this
2368
///   function.  The computation is correct iff active threads are alive.
2369
///   Some threads are alive but they are not active because they do not
2370
///   contribute to the computation in any useful manner.  Turning them off
2371
///   may introduce control flow overheads without any tangible benefits.
2372
/// Effective threads:
2373
///   In order to comply with the argument requirements of the shuffle
2374
///   function, we must keep all lanes holding data alive.  But at most
2375
///   half of them perform value aggregation; we refer to this half of
2376
///   threads as effective. The other half is simply handing off their
2377
///   data.
2378
///
2379
/// Procedure
2380
/// Value shuffle:
2381
///   In this step active threads transfer data from higher lane positions
2382
///   in the warp to lower lane positions, creating Remote Reduce list.
2383
/// Value aggregation:
2384
///   In this step, effective threads combine their thread local Reduce list
2385
///   with Remote Reduce list and store the result in the thread local
2386
///   Reduce list.
2387
/// Value copy:
2388
///   In this step, we deal with the assumption made by algorithm 2
2389
///   (i.e. contiguity assumption).  When we have an odd number of lanes
2390
///   active, say 2k+1, only k threads will be effective and therefore k
2391
///   new values will be produced.  However, the Reduce list owned by the
2392
///   (2k+1)th thread is ignored in the value aggregation.  Therefore
2393
///   we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
2394
///   that the contiguity assumption still holds.
2395
static llvm::Function *emitShuffleAndReduceFunction(
2396
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2397
28
    QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
2398
28
  ASTContext &C = CGM.getContext();
2399
2400
  // Thread local Reduce list used to host the values of data to be reduced.
2401
28
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2402
28
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
2403
  // Current lane id; could be logical.
2404
28
  ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
2405
28
                              ImplicitParamDecl::Other);
2406
  // Offset of the remote source lane relative to the current lane.
2407
28
  ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2408
28
                                        C.ShortTy, ImplicitParamDecl::Other);
2409
  // Algorithm version.  This is expected to be known at compile time.
2410
28
  ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2411
28
                               C.ShortTy, ImplicitParamDecl::Other);
2412
28
  FunctionArgList Args;
2413
28
  Args.push_back(&ReduceListArg);
2414
28
  Args.push_back(&LaneIDArg);
2415
28
  Args.push_back(&RemoteLaneOffsetArg);
2416
28
  Args.push_back(&AlgoVerArg);
2417
2418
28
  const CGFunctionInfo &CGFI =
2419
28
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2420
28
  auto *Fn = llvm::Function::Create(
2421
28
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2422
28
      "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
2423
28
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2424
28
  Fn->setDoesNotRecurse();
2425
2426
28
  CodeGenFunction CGF(CGM);
2427
28
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2428
2429
28
  CGBuilderTy &Bld = CGF.Builder;
2430
2431
28
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2432
28
  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2433
28
  Address LocalReduceList(
2434
28
      Bld.CreatePointerBitCastOrAddrSpaceCast(
2435
28
          CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2436
28
                               C.VoidPtrTy, SourceLocation()),
2437
28
          ElemTy->getPointerTo()),
2438
28
      ElemTy, CGF.getPointerAlign());
2439
2440
28
  Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
2441
28
  llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
2442
28
      AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2443
2444
28
  Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
2445
28
  llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
2446
28
      AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2447
2448
28
  Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
2449
28
  llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
2450
28
      AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2451
2452
  // Create a local thread-private variable to host the Reduce list
2453
  // from a remote lane.
2454
28
  Address RemoteReduceList =
2455
28
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
2456
2457
  // This loop iterates through the list of reduce elements and copies,
2458
  // element by element, from a remote lane in the warp to RemoteReduceList,
2459
  // hosted on the thread's stack.
2460
28
  emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
2461
28
                        LocalReduceList, RemoteReduceList,
2462
28
                        {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
2463
28
                         /*ScratchpadIndex=*/nullptr,
2464
28
                         /*ScratchpadWidth=*/nullptr});
2465
2466
  // The actions to be performed on the Remote Reduce list is dependent
2467
  // on the algorithm version.
2468
  //
2469
  //  if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
2470
  //  LaneId % 2 == 0 && Offset > 0):
2471
  //    do the reduction value aggregation
2472
  //
2473
  //  The thread local variable Reduce list is mutated in place to host the
2474
  //  reduced data, which is the aggregated value produced from local and
2475
  //  remote lanes.
2476
  //
2477
  //  Note that AlgoVer is expected to be a constant integer known at compile
2478
  //  time.
2479
  //  When AlgoVer==0, the first conjunction evaluates to true, making
2480
  //    the entire predicate true during compile time.
2481
  //  When AlgoVer==1, the second conjunction has only the second part to be
2482
  //    evaluated during runtime.  Other conjunctions evaluates to false
2483
  //    during compile time.
2484
  //  When AlgoVer==2, the third conjunction has only the second part to be
2485
  //    evaluated during runtime.  Other conjunctions evaluates to false
2486
  //    during compile time.
2487
28
  llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
2488
2489
28
  llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2490
28
  llvm::Value *CondAlgo1 = Bld.CreateAnd(
2491
28
      Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
2492
2493
28
  llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
2494
28
  llvm::Value *CondAlgo2 = Bld.CreateAnd(
2495
28
      Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
2496
28
  CondAlgo2 = Bld.CreateAnd(
2497
28
      CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
2498
2499
28
  llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
2500
28
  CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
2501
2502
28
  llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2503
28
  llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2504
28
  llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2505
28
  Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
2506
2507
28
  CGF.EmitBlock(ThenBB);
2508
  // reduce_function(LocalReduceList, RemoteReduceList)
2509
28
  llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2510
28
      LocalReduceList.getPointer(), CGF.VoidPtrTy);
2511
28
  llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2512
28
      RemoteReduceList.getPointer(), CGF.VoidPtrTy);
2513
28
  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2514
28
      CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
2515
28
  Bld.CreateBr(MergeBB);
2516
2517
28
  CGF.EmitBlock(ElseBB);
2518
28
  Bld.CreateBr(MergeBB);
2519
2520
28
  CGF.EmitBlock(MergeBB);
2521
2522
  // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
2523
  // Reduce list.
2524
28
  Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2525
28
  llvm::Value *CondCopy = Bld.CreateAnd(
2526
28
      Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
2527
2528
28
  llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
2529
28
  llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
2530
28
  llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
2531
28
  Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
2532
2533
28
  CGF.EmitBlock(CpyThenBB);
2534
28
  emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
2535
28
                        RemoteReduceList, LocalReduceList);
2536
28
  Bld.CreateBr(CpyMergeBB);
2537
2538
28
  CGF.EmitBlock(CpyElseBB);
2539
28
  Bld.CreateBr(CpyMergeBB);
2540
2541
28
  CGF.EmitBlock(CpyMergeBB);
2542
2543
28
  CGF.FinishFunction();
2544
28
  return Fn;
2545
28
}
2546
2547
/// This function emits a helper that copies all the reduction variables from
2548
/// the team into the provided global buffer for the reduction variables.
2549
///
2550
/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2551
///   For all data entries D in reduce_data:
2552
///     Copy local D to buffer.D[Idx]
2553
static llvm::Value *emitListToGlobalCopyFunction(
2554
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2555
    QualType ReductionArrayTy, SourceLocation Loc,
2556
    const RecordDecl *TeamReductionRec,
2557
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2558
9
        &VarFieldMap) {
2559
9
  ASTContext &C = CGM.getContext();
2560
2561
  // Buffer: global reduction buffer.
2562
9
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2563
9
                              C.VoidPtrTy, ImplicitParamDecl::Other);
2564
  // Idx: index of the buffer.
2565
9
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2566
9
                           ImplicitParamDecl::Other);
2567
  // ReduceList: thread local Reduce list.
2568
9
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2569
9
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
2570
9
  FunctionArgList Args;
2571
9
  Args.push_back(&BufferArg);
2572
9
  Args.push_back(&IdxArg);
2573
9
  Args.push_back(&ReduceListArg);
2574
2575
9
  const CGFunctionInfo &CGFI =
2576
9
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2577
9
  auto *Fn = llvm::Function::Create(
2578
9
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2579
9
      "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
2580
9
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2581
9
  Fn->setDoesNotRecurse();
2582
9
  CodeGenFunction CGF(CGM);
2583
9
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2584
2585
9
  CGBuilderTy &Bld = CGF.Builder;
2586
2587
9
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2588
9
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2589
9
  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2590
9
  Address LocalReduceList(
2591
9
      Bld.CreatePointerBitCastOrAddrSpaceCast(
2592
9
          CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2593
9
                               C.VoidPtrTy, Loc),
2594
9
          ElemTy->getPointerTo()),
2595
9
      ElemTy, CGF.getPointerAlign());
2596
9
  QualType StaticTy = C.getRecordType(TeamReductionRec);
2597
9
  llvm::Type *LLVMReductionsBufferTy =
2598
9
      CGM.getTypes().ConvertTypeForMem(StaticTy);
2599
9
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2600
9
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2601
9
      LLVMReductionsBufferTy->getPointerTo());
2602
9
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2603
9
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2604
9
                                              /*Volatile=*/false, C.IntTy,
2605
9
                                              Loc)};
2606
9
  unsigned Idx = 0;
2607
15
  for (const Expr *Private : Privates) {
2608
    // Reduce element = LocalReduceList[i]
2609
15
    Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2610
15
    llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2611
15
        ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2612
    // elemptr = ((CopyType*)(elemptrptr)) + I
2613
15
    ElemTy = CGF.ConvertTypeForMem(Private->getType());
2614
15
    ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2615
15
        ElemPtrPtr, ElemTy->getPointerTo());
2616
15
    Address ElemPtr =
2617
15
        Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
2618
15
    const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2619
    // Global = Buffer.VD[Idx];
2620
15
    const FieldDecl *FD = VarFieldMap.lookup(VD);
2621
15
    LValue GlobLVal = CGF.EmitLValueForField(
2622
15
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2623
15
    Address GlobAddr = GlobLVal.getAddress(CGF);
2624
15
    llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
2625
15
                                                   GlobAddr.getPointer(), Idxs);
2626
15
    GlobLVal.setAddress(Address(BufferPtr,
2627
15
                                CGF.ConvertTypeForMem(Private->getType()),
2628
15
                                GlobAddr.getAlignment()));
2629
15
    switch (CGF.getEvaluationKind(Private->getType())) {
2630
15
    case TEK_Scalar: {
2631
15
      llvm::Value *V = CGF.EmitLoadOfScalar(
2632
15
          ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
2633
15
          LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2634
15
      CGF.EmitStoreOfScalar(V, GlobLVal);
2635
15
      break;
2636
0
    }
2637
0
    case TEK_Complex: {
2638
0
      CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
2639
0
          CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
2640
0
      CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
2641
0
      break;
2642
0
    }
2643
0
    case TEK_Aggregate:
2644
0
      CGF.EmitAggregateCopy(GlobLVal,
2645
0
                            CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2646
0
                            Private->getType(), AggValueSlot::DoesNotOverlap);
2647
0
      break;
2648
15
    }
2649
15
    ++Idx;
2650
15
  }
2651
2652
9
  CGF.FinishFunction();
2653
9
  return Fn;
2654
9
}
2655
2656
/// This function emits a helper that reduces all the reduction variables from
2657
/// the team into the provided global buffer for the reduction variables.
2658
///
2659
/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
2660
///  void *GlobPtrs[];
2661
///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
2662
///  ...
2663
///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
2664
///  reduce_function(GlobPtrs, reduce_data);
2665
static llvm::Value *emitListToGlobalReduceFunction(
2666
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2667
    QualType ReductionArrayTy, SourceLocation Loc,
2668
    const RecordDecl *TeamReductionRec,
2669
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2670
        &VarFieldMap,
2671
9
    llvm::Function *ReduceFn) {
2672
9
  ASTContext &C = CGM.getContext();
2673
2674
  // Buffer: global reduction buffer.
2675
9
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2676
9
                              C.VoidPtrTy, ImplicitParamDecl::Other);
2677
  // Idx: index of the buffer.
2678
9
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2679
9
                           ImplicitParamDecl::Other);
2680
  // ReduceList: thread local Reduce list.
2681
9
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2682
9
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
2683
9
  FunctionArgList Args;
2684
9
  Args.push_back(&BufferArg);
2685
9
  Args.push_back(&IdxArg);
2686
9
  Args.push_back(&ReduceListArg);
2687
2688
9
  const CGFunctionInfo &CGFI =
2689
9
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2690
9
  auto *Fn = llvm::Function::Create(
2691
9
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2692
9
      "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
2693
9
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2694
9
  Fn->setDoesNotRecurse();
2695
9
  CodeGenFunction CGF(CGM);
2696
9
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2697
2698
9
  CGBuilderTy &Bld = CGF.Builder;
2699
2700
9
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2701
9
  QualType StaticTy = C.getRecordType(TeamReductionRec);
2702
9
  llvm::Type *LLVMReductionsBufferTy =
2703
9
      CGM.getTypes().ConvertTypeForMem(StaticTy);
2704
9
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2705
9
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2706
9
      LLVMReductionsBufferTy->getPointerTo());
2707
2708
  // 1. Build a list of reduction variables.
2709
  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2710
9
  Address ReductionList =
2711
9
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2712
9
  auto IPriv = Privates.begin();
2713
9
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2714
9
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2715
9
                                              /*Volatile=*/false, C.IntTy,
2716
9
                                              Loc)};
2717
9
  unsigned Idx = 0;
2718
24
  for (unsigned I = 0, E = Privates.size(); I < E; 
++I, ++IPriv, ++Idx15
) {
2719
15
    Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2720
    // Global = Buffer.VD[Idx];
2721
15
    const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2722
15
    const FieldDecl *FD = VarFieldMap.lookup(VD);
2723
15
    LValue GlobLVal = CGF.EmitLValueForField(
2724
15
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2725
15
    Address GlobAddr = GlobLVal.getAddress(CGF);
2726
15
    llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2727
15
        GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2728
15
    llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2729
15
    CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2730
15
    if ((*IPriv)->getType()->isVariablyModifiedType()) {
2731
      // Store array size.
2732
0
      ++Idx;
2733
0
      Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2734
0
      llvm::Value *Size = CGF.Builder.CreateIntCast(
2735
0
          CGF.getVLASize(
2736
0
                 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2737
0
              .NumElts,
2738
0
          CGF.SizeTy, /*isSigned=*/false);
2739
0
      CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2740
0
                              Elem);
2741
0
    }
2742
15
  }
2743
2744
  // Call reduce_function(GlobalReduceList, ReduceList)
2745
9
  llvm::Value *GlobalReduceList =
2746
9
      CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2747
9
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2748
9
  llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2749
9
      AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2750
9
  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2751
9
      CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
2752
9
  CGF.FinishFunction();
2753
9
  return Fn;
2754
9
}
2755
2756
/// This function emits a helper that copies all the reduction variables from
2757
/// the team into the provided global buffer for the reduction variables.
2758
///
2759
/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2760
///   For all data entries D in reduce_data:
2761
///     Copy buffer.D[Idx] to local D;
2762
static llvm::Value *emitGlobalToListCopyFunction(
2763
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2764
    QualType ReductionArrayTy, SourceLocation Loc,
2765
    const RecordDecl *TeamReductionRec,
2766
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2767
9
        &VarFieldMap) {
2768
9
  ASTContext &C = CGM.getContext();
2769
2770
  // Buffer: global reduction buffer.
2771
9
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2772
9
                              C.VoidPtrTy, ImplicitParamDecl::Other);
2773
  // Idx: index of the buffer.
2774
9
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2775
9
                           ImplicitParamDecl::Other);
2776
  // ReduceList: thread local Reduce list.
2777
9
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2778
9
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
2779
9
  FunctionArgList Args;
2780
9
  Args.push_back(&BufferArg);
2781
9
  Args.push_back(&IdxArg);
2782
9
  Args.push_back(&ReduceListArg);
2783
2784
9
  const CGFunctionInfo &CGFI =
2785
9
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2786
9
  auto *Fn = llvm::Function::Create(
2787
9
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2788
9
      "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
2789
9
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2790
9
  Fn->setDoesNotRecurse();
2791
9
  CodeGenFunction CGF(CGM);
2792
9
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2793
2794
9
  CGBuilderTy &Bld = CGF.Builder;
2795
2796
9
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2797
9
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2798
9
  llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2799
9
  Address LocalReduceList(
2800
9
      Bld.CreatePointerBitCastOrAddrSpaceCast(
2801
9
          CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2802
9
                               C.VoidPtrTy, Loc),
2803
9
          ElemTy->getPointerTo()),
2804
9
      ElemTy, CGF.getPointerAlign());
2805
9
  QualType StaticTy = C.getRecordType(TeamReductionRec);
2806
9
  llvm::Type *LLVMReductionsBufferTy =
2807
9
      CGM.getTypes().ConvertTypeForMem(StaticTy);
2808
9
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2809
9
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2810
9
      LLVMReductionsBufferTy->getPointerTo());
2811
2812
9
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2813
9
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2814
9
                                              /*Volatile=*/false, C.IntTy,
2815
9
                                              Loc)};
2816
9
  unsigned Idx = 0;
2817
15
  for (const Expr *Private : Privates) {
2818
    // Reduce element = LocalReduceList[i]
2819
15
    Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2820
15
    llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2821
15
        ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2822
    // elemptr = ((CopyType*)(elemptrptr)) + I
2823
15
    ElemTy = CGF.ConvertTypeForMem(Private->getType());
2824
15
    ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2825
15
        ElemPtrPtr, ElemTy->getPointerTo());
2826
15
    Address ElemPtr =
2827
15
        Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
2828
15
    const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2829
    // Global = Buffer.VD[Idx];
2830
15
    const FieldDecl *FD = VarFieldMap.lookup(VD);
2831
15
    LValue GlobLVal = CGF.EmitLValueForField(
2832
15
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2833
15
    Address GlobAddr = GlobLVal.getAddress(CGF);
2834
15
    llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
2835
15
                                                   GlobAddr.getPointer(), Idxs);
2836
15
    GlobLVal.setAddress(Address(BufferPtr,
2837
15
                                CGF.ConvertTypeForMem(Private->getType()),
2838
15
                                GlobAddr.getAlignment()));
2839
15
    switch (CGF.getEvaluationKind(Private->getType())) {
2840
15
    case TEK_Scalar: {
2841
15
      llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
2842
15
      CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
2843
15
                            LValueBaseInfo(AlignmentSource::Type),
2844
15
                            TBAAAccessInfo());
2845
15
      break;
2846
0
    }
2847
0
    case TEK_Complex: {
2848
0
      CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
2849
0
      CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2850
0
                             /*isInit=*/false);
2851
0
      break;
2852
0
    }
2853
0
    case TEK_Aggregate:
2854
0
      CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2855
0
                            GlobLVal, Private->getType(),
2856
0
                            AggValueSlot::DoesNotOverlap);
2857
0
      break;
2858
15
    }
2859
15
    ++Idx;
2860
15
  }
2861
2862
9
  CGF.FinishFunction();
2863
9
  return Fn;
2864
9
}
2865
2866
/// This function emits a helper that reduces all the reduction variables from
2867
/// the team into the provided global buffer for the reduction variables.
2868
///
2869
/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
2870
///  void *GlobPtrs[];
2871
///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
2872
///  ...
2873
///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
2874
///  reduce_function(reduce_data, GlobPtrs);
2875
static llvm::Value *emitGlobalToListReduceFunction(
2876
    CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2877
    QualType ReductionArrayTy, SourceLocation Loc,
2878
    const RecordDecl *TeamReductionRec,
2879
    const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2880
        &VarFieldMap,
2881
9
    llvm::Function *ReduceFn) {
2882
9
  ASTContext &C = CGM.getContext();
2883
2884
  // Buffer: global reduction buffer.
2885
9
  ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2886
9
                              C.VoidPtrTy, ImplicitParamDecl::Other);
2887
  // Idx: index of the buffer.
2888
9
  ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2889
9
                           ImplicitParamDecl::Other);
2890
  // ReduceList: thread local Reduce list.
2891
9
  ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2892
9
                                  C.VoidPtrTy, ImplicitParamDecl::Other);
2893
9
  FunctionArgList Args;
2894
9
  Args.push_back(&BufferArg);
2895
9
  Args.push_back(&IdxArg);
2896
9
  Args.push_back(&ReduceListArg);
2897
2898
9
  const CGFunctionInfo &CGFI =
2899
9
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2900
9
  auto *Fn = llvm::Function::Create(
2901
9
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2902
9
      "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
2903
9
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2904
9
  Fn->setDoesNotRecurse();
2905
9
  CodeGenFunction CGF(CGM);
2906
9
  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2907
2908
9
  CGBuilderTy &Bld = CGF.Builder;
2909
2910
9
  Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2911
9
  QualType StaticTy = C.getRecordType(TeamReductionRec);
2912
9
  llvm::Type *LLVMReductionsBufferTy =
2913
9
      CGM.getTypes().ConvertTypeForMem(StaticTy);
2914
9
  llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2915
9
      CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2916
9
      LLVMReductionsBufferTy->getPointerTo());
2917
2918
  // 1. Build a list of reduction variables.
2919
  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2920
9
  Address ReductionList =
2921
9
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2922
9
  auto IPriv = Privates.begin();
2923
9
  llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2924
9
                         CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2925
9
                                              /*Volatile=*/false, C.IntTy,
2926
9
                                              Loc)};
2927
9
  unsigned Idx = 0;
2928
24
  for (unsigned I = 0, E = Privates.size(); I < E; 
++I, ++IPriv, ++Idx15
) {
2929
15
    Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2930
    // Global = Buffer.VD[Idx];
2931
15
    const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2932
15
    const FieldDecl *FD = VarFieldMap.lookup(VD);
2933
15
    LValue GlobLVal = CGF.EmitLValueForField(
2934
15
        CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2935
15
    Address GlobAddr = GlobLVal.getAddress(CGF);
2936
15
    llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2937
15
        GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2938
15
    llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2939
15
    CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2940
15
    if ((*IPriv)->getType()->isVariablyModifiedType()) {
2941
      // Store array size.
2942
0
      ++Idx;
2943
0
      Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2944
0
      llvm::Value *Size = CGF.Builder.CreateIntCast(
2945
0
          CGF.getVLASize(
2946
0
                 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2947
0
              .NumElts,
2948
0
          CGF.SizeTy, /*isSigned=*/false);
2949
0
      CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2950
0
                              Elem);
2951
0
    }
2952
15
  }
2953
2954
  // Call reduce_function(ReduceList, GlobalReduceList)
2955
9
  llvm::Value *GlobalReduceList =
2956
9
      CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2957
9
  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2958
9
  llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2959
9
      AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2960
9
  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2961
9
      CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
2962
9
  CGF.FinishFunction();
2963
9
  return Fn;
2964
9
}
2965
2966
///
2967
/// Design of OpenMP reductions on the GPU
2968
///
2969
/// Consider a typical OpenMP program with one or more reduction
2970
/// clauses:
2971
///
2972
/// float foo;
2973
/// double bar;
2974
/// #pragma omp target teams distribute parallel for \
2975
///             reduction(+:foo) reduction(*:bar)
2976
/// for (int i = 0; i < N; i++) {
2977
///   foo += A[i]; bar *= B[i];
2978
/// }
2979
///
2980
/// where 'foo' and 'bar' are reduced across all OpenMP threads in
2981
/// all teams.  In our OpenMP implementation on the NVPTX device an
2982
/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
2983
/// within a team are mapped to CUDA threads within a threadblock.
2984
/// Our goal is to efficiently aggregate values across all OpenMP
2985
/// threads such that:
2986
///
2987
///   - the compiler and runtime are logically concise, and
2988
///   - the reduction is performed efficiently in a hierarchical
2989
///     manner as follows: within OpenMP threads in the same warp,
2990
///     across warps in a threadblock, and finally across teams on
2991
///     the NVPTX device.
2992
///
2993
/// Introduction to Decoupling
2994
///
2995
/// We would like to decouple the compiler and the runtime so that the
2996
/// latter is ignorant of the reduction variables (number, data types)
2997
/// and the reduction operators.  This allows a simpler interface
2998
/// and implementation while still attaining good performance.
2999
///
3000
/// Pseudocode for the aforementioned OpenMP program generated by the
3001
/// compiler is as follows:
3002
///
3003
/// 1. Create private copies of reduction variables on each OpenMP
3004
///    thread: 'foo_private', 'bar_private'
3005
/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
3006
///    to it and writes the result in 'foo_private' and 'bar_private'
3007
///    respectively.
3008
/// 3. Call the OpenMP runtime on the GPU to reduce within a team
3009
///    and store the result on the team master:
3010
///
3011
///     __kmpc_nvptx_parallel_reduce_nowait_v2(...,
3012
///        reduceData, shuffleReduceFn, interWarpCpyFn)
3013
///
3014
///     where:
3015
///       struct ReduceData {
3016
///         double *foo;
3017
///         double *bar;
3018
///       } reduceData
3019
///       reduceData.foo = &foo_private
3020
///       reduceData.bar = &bar_private
3021
///
3022
///     'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
3023
///     auxiliary functions generated by the compiler that operate on
3024
///     variables of type 'ReduceData'.  They aid the runtime perform
3025
///     algorithmic steps in a data agnostic manner.
3026
///
3027
///     'shuffleReduceFn' is a pointer to a function that reduces data
3028
///     of type 'ReduceData' across two OpenMP threads (lanes) in the
3029
///     same warp.  It takes the following arguments as input:
3030
///
3031
///     a. variable of type 'ReduceData' on the calling lane,
3032
///     b. its lane_id,
3033
///     c. an offset relative to the current lane_id to generate a
3034
///        remote_lane_id.  The remote lane contains the second
3035
///        variable of type 'ReduceData' that is to be reduced.
3036
///     d. an algorithm version parameter determining which reduction
3037
///        algorithm to use.
3038
///
3039
///     'shuffleReduceFn' retrieves data from the remote lane using
3040
///     efficient GPU shuffle intrinsics and reduces, using the
3041
///     algorithm specified by the 4th parameter, the two operands
3042
///     element-wise.  The result is written to the first operand.
3043
///
3044
///     Different reduction algorithms are implemented in different
3045
///     runtime functions, all calling 'shuffleReduceFn' to perform
3046
///     the essential reduction step.  Therefore, based on the 4th
3047
///     parameter, this function behaves slightly differently to
3048
///     cooperate with the runtime to ensure correctness under
3049
///     different circumstances.
3050
///
3051
///     'InterWarpCpyFn' is a pointer to a function that transfers
3052
///     reduced variables across warps.  It tunnels, through CUDA
3053
///     shared memory, the thread-private data of type 'ReduceData'
3054
///     from lane 0 of each warp to a lane in the first warp.
3055
/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3056
///    The last team writes the global reduced value to memory.
3057
///
3058
///     ret = __kmpc_nvptx_teams_reduce_nowait(...,
3059
///             reduceData, shuffleReduceFn, interWarpCpyFn,
3060
///             scratchpadCopyFn, loadAndReduceFn)
3061
///
3062
///     'scratchpadCopyFn' is a helper that stores reduced
3063
///     data from the team master to a scratchpad array in
3064
///     global memory.
3065
///
3066
///     'loadAndReduceFn' is a helper that loads data from
3067
///     the scratchpad array and reduces it with the input
3068
///     operand.
3069
///
3070
///     These compiler generated functions hide address
3071
///     calculation and alignment information from the runtime.
3072
/// 5. if ret == 1:
3073
///     The team master of the last team stores the reduced
3074
///     result to the globals in memory.
3075
///     foo += reduceData.foo; bar *= reduceData.bar
3076
///
3077
///
3078
/// Warp Reduction Algorithms
3079
///
3080
/// On the warp level, we have three algorithms implemented in the
3081
/// OpenMP runtime depending on the number of active lanes:
3082
///
3083
/// Full Warp Reduction
3084
///
3085
/// The reduce algorithm within a warp where all lanes are active
3086
/// is implemented in the runtime as follows:
3087
///
3088
/// full_warp_reduce(void *reduce_data,
3089
///                  kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3090
///   for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3091
///     ShuffleReduceFn(reduce_data, 0, offset, 0);
3092
/// }
3093
///
3094
/// The algorithm completes in log(2, WARPSIZE) steps.
3095
///
3096
/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3097
/// not used therefore we save instructions by not retrieving lane_id
3098
/// from the corresponding special registers.  The 4th parameter, which
3099
/// represents the version of the algorithm being used, is set to 0 to
3100
/// signify full warp reduction.
3101
///
3102
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3103
///
3104
/// #reduce_elem refers to an element in the local lane's data structure
3105
/// #remote_elem is retrieved from a remote lane
3106
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3107
/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3108
///
3109
/// Contiguous Partial Warp Reduction
3110
///
3111
/// This reduce algorithm is used within a warp where only the first
3112
/// 'n' (n <= WARPSIZE) lanes are active.  It is typically used when the
3113
/// number of OpenMP threads in a parallel region is not a multiple of
3114
/// WARPSIZE.  The algorithm is implemented in the runtime as follows:
3115
///
3116
/// void
3117
/// contiguous_partial_reduce(void *reduce_data,
3118
///                           kmp_ShuffleReductFctPtr ShuffleReduceFn,
3119
///                           int size, int lane_id) {
3120
///   int curr_size;
3121
///   int offset;
3122
///   curr_size = size;
3123
///   mask = curr_size/2;
3124
///   while (offset>0) {
3125
///     ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3126
///     curr_size = (curr_size+1)/2;
3127
///     offset = curr_size/2;
3128
///   }
3129
/// }
3130
///
3131
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3132
///
3133
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3134
/// if (lane_id < offset)
3135
///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3136
/// else
3137
///     reduce_elem = remote_elem
3138
///
3139
/// This algorithm assumes that the data to be reduced are located in a
3140
/// contiguous subset of lanes starting from the first.  When there is
3141
/// an odd number of active lanes, the data in the last lane is not
3142
/// aggregated with any other lane's dat but is instead copied over.
3143
///
3144
/// Dispersed Partial Warp Reduction
3145
///
3146
/// This algorithm is used within a warp when any discontiguous subset of
3147
/// lanes are active.  It is used to implement the reduction operation
3148
/// across lanes in an OpenMP simd region or in a nested parallel region.
3149
///
3150
/// void
3151
/// dispersed_partial_reduce(void *reduce_data,
3152
///                          kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3153
///   int size, remote_id;
3154
///   int logical_lane_id = number_of_active_lanes_before_me() * 2;
3155
///   do {
3156
///       remote_id = next_active_lane_id_right_after_me();
3157
///       # the above function returns 0 of no active lane
3158
///       # is present right after the current lane.
3159
///       size = number_of_active_lanes_in_this_warp();
3160
///       logical_lane_id /= 2;
3161
///       ShuffleReduceFn(reduce_data, logical_lane_id,
3162
///                       remote_id-1-threadIdx.x, 2);
3163
///   } while (logical_lane_id % 2 == 0 && size > 1);
3164
/// }
3165
///
3166
/// There is no assumption made about the initial state of the reduction.
3167
/// Any number of lanes (>=1) could be active at any position.  The reduction
3168
/// result is returned in the first active lane.
3169
///
3170
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3171
///
3172
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3173
/// if (lane_id % 2 == 0 && offset > 0)
3174
///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3175
/// else
3176
///     reduce_elem = remote_elem
3177
///
3178
///
3179
/// Intra-Team Reduction
3180
///
3181
/// This function, as implemented in the runtime call
3182
/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3183
/// threads in a team.  It first reduces within a warp using the
3184
/// aforementioned algorithms.  We then proceed to gather all such
3185
/// reduced values at the first warp.
3186
///
3187
/// The runtime makes use of the function 'InterWarpCpyFn', which copies
3188
/// data from each of the "warp master" (zeroth lane of each warp, where
3189
/// warp-reduced data is held) to the zeroth warp.  This step reduces (in
3190
/// a mathematical sense) the problem of reduction across warp masters in
3191
/// a block to the problem of warp reduction.
3192
///
3193
///
3194
/// Inter-Team Reduction
3195
///
3196
/// Once a team has reduced its data to a single value, it is stored in
3197
/// a global scratchpad array.  Since each team has a distinct slot, this
3198
/// can be done without locking.
3199
///
3200
/// The last team to write to the scratchpad array proceeds to reduce the
3201
/// scratchpad array.  One or more workers in the last team use the helper
3202
/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3203
/// the k'th worker reduces every k'th element.
3204
///
3205
/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3206
/// reduce across workers and compute a globally reduced value.
3207
///
3208
void CGOpenMPRuntimeGPU::emitReduction(
3209
    CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3210
    ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3211
34
    ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3212
34
  if (!CGF.HaveInsertPoint())
3213
0
    return;
3214
3215
34
  bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3216
34
#ifndef NDEBUG
3217
34
  bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3218
34
#endif
3219
3220
34
  if (Options.SimpleReduction) {
3221
6
    assert(!TeamsReduction && !ParallelReduction &&
3222
6
           "Invalid reduction selection in emitReduction.");
3223
0
    CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3224
6
                                   ReductionOps, Options);
3225
6
    return;
3226
6
  }
3227
3228
28
  assert((TeamsReduction || ParallelReduction) &&
3229
28
         "Invalid reduction selection in emitReduction.");
3230
3231
  // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3232
  // RedList, shuffle_reduce_func, interwarp_copy_func);
3233
  // or
3234
  // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3235
0
  llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3236
28
  llvm::Value *ThreadId = getThreadID(CGF, Loc);
3237
3238
28
  llvm::Value *Res;
3239
28
  ASTContext &C = CGM.getContext();
3240
  // 1. Build a list of reduction variables.
3241
  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3242
28
  auto Size = RHSExprs.size();
3243
43
  for (const Expr *E : Privates) {
3244
43
    if (E->getType()->isVariablyModifiedType())
3245
      // Reserve place for array size.
3246
0
      ++Size;
3247
43
  }
3248
28
  llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3249
28
  QualType ReductionArrayTy =
3250
28
      C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3251
28
                             /*IndexTypeQuals=*/0);
3252
28
  Address ReductionList =
3253
28
      CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3254
28
  auto IPriv = Privates.begin();
3255
28
  unsigned Idx = 0;
3256
71
  for (unsigned I = 0, E = RHSExprs.size(); I < E; 
++I, ++IPriv, ++Idx43
) {
3257
43
    Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3258
43
    CGF.Builder.CreateStore(
3259
43
        CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3260
43
            CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3261
43
        Elem);
3262
43
    if ((*IPriv)->getType()->isVariablyModifiedType()) {
3263
      // Store array size.
3264
0
      ++Idx;
3265
0
      Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3266
0
      llvm::Value *Size = CGF.Builder.CreateIntCast(
3267
0
          CGF.getVLASize(
3268
0
                 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3269
0
              .NumElts,
3270
0
          CGF.SizeTy, /*isSigned=*/false);
3271
0
      CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3272
0
                              Elem);
3273
0
    }
3274
43
  }
3275
3276
28
  llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3277
28
      ReductionList.getPointer(), CGF.VoidPtrTy);
3278
28
  llvm::Function *ReductionFn =
3279
28
      emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
3280
28
                            Privates, LHSExprs, RHSExprs, ReductionOps);
3281
28
  llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3282
28
  llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3283
28
      CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3284
28
  llvm::Value *InterWarpCopyFn =
3285
28
      emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3286
3287
28
  if (ParallelReduction) {
3288
19
    llvm::Value *Args[] = {RTLoc,
3289
19
                           ThreadId,
3290
19
                           CGF.Builder.getInt32(RHSExprs.size()),
3291
19
                           ReductionArrayTySize,
3292
19
                           RL,
3293
19
                           ShuffleAndReduceFn,
3294
19
                           InterWarpCopyFn};
3295
3296
19
    Res = CGF.EmitRuntimeCall(
3297
19
        OMPBuilder.getOrCreateRuntimeFunction(
3298
19
            CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
3299
19
        Args);
3300
19
  } else {
3301
9
    assert(TeamsReduction && "expected teams reduction.");
3302
0
    llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
3303
9
    llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
3304
9
    int Cnt = 0;
3305
15
    for (const Expr *DRE : Privates) {
3306
15
      PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
3307
15
      ++Cnt;
3308
15
    }
3309
9
    const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
3310
9
        CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
3311
9
        C.getLangOpts().OpenMPCUDAReductionBufNum);
3312
9
    TeamsReductions.push_back(TeamReductionRec);
3313
9
    if (!KernelTeamsReductionPtr) {
3314
3
      KernelTeamsReductionPtr = new llvm::GlobalVariable(
3315
3
          CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
3316
3
          llvm::GlobalValue::InternalLinkage, nullptr,
3317
3
          "_openmp_teams_reductions_buffer_$_$ptr");
3318
3
    }
3319
9
    llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
3320
9
        Address(KernelTeamsReductionPtr, CGF.VoidPtrTy, CGM.getPointerAlign()),
3321
9
        /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
3322
9
    llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
3323
9
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3324
9
    llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
3325
9
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3326
9
        ReductionFn);
3327
9
    llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
3328
9
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3329
9
    llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
3330
9
        CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3331
9
        ReductionFn);
3332
3333
9
    llvm::Value *Args[] = {
3334
9
        RTLoc,
3335
9
        ThreadId,
3336
9
        GlobalBufferPtr,
3337
9
        CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
3338
9
        RL,
3339
9
        ShuffleAndReduceFn,
3340
9
        InterWarpCopyFn,
3341
9
        GlobalToBufferCpyFn,
3342
9
        GlobalToBufferRedFn,
3343
9
        BufferToGlobalCpyFn,
3344
9
        BufferToGlobalRedFn};
3345
3346
9
    Res = CGF.EmitRuntimeCall(
3347
9
        OMPBuilder.getOrCreateRuntimeFunction(
3348
9
            CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
3349
9
        Args);
3350
9
  }
3351
3352
  // 5. Build if (res == 1)
3353
0
  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
3354
28
  llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
3355
28
  llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
3356
28
      Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
3357
28
  CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
3358
3359
  // 6. Build then branch: where we have reduced values in the master
3360
  //    thread in each team.
3361
  //    __kmpc_end_reduce{_nowait}(<gtid>);
3362
  //    break;
3363
28
  CGF.EmitBlock(ThenBB);
3364
3365
  // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
3366
28
  auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
3367
28
                    this](CodeGenFunction &CGF, PrePostActionTy &Action) {
3368
28
    auto IPriv = Privates.begin();
3369
28
    auto ILHS = LHSExprs.begin();
3370
28
    auto IRHS = RHSExprs.begin();
3371
43
    for (const Expr *E : ReductionOps) {
3372
43
      emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
3373
43
                                  cast<DeclRefExpr>(*IRHS));
3374
43
      ++IPriv;
3375
43
      ++ILHS;
3376
43
      ++IRHS;
3377
43
    }
3378
28
  };
3379
28
  llvm::Value *EndArgs[] = {ThreadId};
3380
28
  RegionCodeGenTy RCG(CodeGen);
3381
28
  NVPTXActionTy Action(
3382
28
      nullptr, llvm::None,
3383
28
      OMPBuilder.getOrCreateRuntimeFunction(
3384
28
          CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
3385
28
      EndArgs);
3386
28
  RCG.setAction(Action);
3387
28
  RCG(CGF);
3388
  // There is no need to emit line number for unconditional branch.
3389
28
  (void)ApplyDebugLocation::CreateEmpty(CGF);
3390
28
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3391
28
}
3392
3393
const VarDecl *
3394
CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
3395
83
                                       const VarDecl *NativeParam) const {
3396
83
  if (!NativeParam->getType()->isReferenceType())
3397
25
    return NativeParam;
3398
58
  QualType ArgType = NativeParam->getType();
3399
58
  QualifierCollector QC;
3400
58
  const Type *NonQualTy = QC.strip(ArgType);
3401
58
  QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3402
58
  if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
3403
58
    if (Attr->getCaptureKind() == OMPC_map) {
3404
38
      PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
3405
38
                                                        LangAS::opencl_global);
3406
38
    }
3407
58
  }
3408
58
  ArgType = CGM.getContext().getPointerType(PointeeTy);
3409
58
  QC.addRestrict();
3410
58
  enum { NVPTX_local_addr = 5 };
3411
58
  QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
3412
58
  ArgType = QC.apply(CGM.getContext(), ArgType);
3413
58
  if (isa<ImplicitParamDecl>(NativeParam))
3414
0
    return ImplicitParamDecl::Create(
3415
0
        CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
3416
0
        NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
3417
58
  return ParmVarDecl::Create(
3418
58
      CGM.getContext(),
3419
58
      const_cast<DeclContext *>(NativeParam->getDeclContext()),
3420
58
      NativeParam->getBeginLoc(), NativeParam->getLocation(),
3421
58
      NativeParam->getIdentifier(), ArgType,
3422
58
      /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
3423
58
}
3424
3425
Address
3426
CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
3427
                                          const VarDecl *NativeParam,
3428
58
                                          const VarDecl *TargetParam) const {
3429
58
  assert(NativeParam != TargetParam &&
3430
58
         NativeParam->getType()->isReferenceType() &&
3431
58
         "Native arg must not be the same as target arg.");
3432
0
  Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
3433
58
  QualType NativeParamType = NativeParam->getType();
3434
58
  QualifierCollector QC;
3435
58
  const Type *NonQualTy = QC.strip(NativeParamType);
3436
58
  QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3437
58
  unsigned NativePointeeAddrSpace =
3438
58
      CGF.getContext().getTargetAddressSpace(NativePointeeTy);
3439
58
  QualType TargetTy = TargetParam->getType();
3440
58
  llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
3441
58
      LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
3442
  // First cast to generic.
3443
58
  TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3444
58
      TargetAddr, llvm::PointerType::getWithSamePointeeType(
3445
58
          cast<llvm::PointerType>(TargetAddr->getType()), /*AddrSpace=*/0));
3446
  // Cast from generic to native address space.
3447
58
  TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3448
58
      TargetAddr, llvm::PointerType::getWithSamePointeeType(
3449
58
          cast<llvm::PointerType>(TargetAddr->getType()),
3450
58
                                  NativePointeeAddrSpace));
3451
58
  Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
3452
58
  CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
3453
58
                        NativeParamType);
3454
58
  return NativeParamAddr;
3455
58
}
3456
3457
void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
3458
    CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
3459
460
    ArrayRef<llvm::Value *> Args) const {
3460
460
  SmallVector<llvm::Value *, 4> TargetArgs;
3461
460
  TargetArgs.reserve(Args.size());
3462
460
  auto *FnType = OutlinedFn.getFunctionType();
3463
1.66k
  for (unsigned I = 0, E = Args.size(); I < E; 
++I1.20k
) {
3464
1.20k
    if (FnType->isVarArg() && 
FnType->getNumParams() <= I0
) {
3465
0
      TargetArgs.append(std::next(Args.begin(), I), Args.end());
3466
0
      break;
3467
0
    }
3468
1.20k
    llvm::Type *TargetType = FnType->getParamType(I);
3469
1.20k
    llvm::Value *NativeArg = Args[I];
3470
1.20k
    if (!TargetType->isPointerTy()) {
3471
121
      TargetArgs.emplace_back(NativeArg);
3472
121
      continue;
3473
121
    }
3474
1.08k
    llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3475
1.08k
        NativeArg, llvm::PointerType::getWithSamePointeeType(
3476
1.08k
            cast<llvm::PointerType>(NativeArg->getType()), /*AddrSpace*/ 0));
3477
1.08k
    TargetArgs.emplace_back(
3478
1.08k
        CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
3479
1.08k
  }
3480
460
  CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
3481
460
}
3482
3483
/// Emit function which wraps the outline parallel region
3484
/// and controls the arguments which are passed to this function.
3485
/// The wrapper ensures that the outlined function is called
3486
/// with the correct arguments when data is shared.
3487
llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
3488
57
    llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
3489
57
  ASTContext &Ctx = CGM.getContext();
3490
57
  const auto &CS = *D.getCapturedStmt(OMPD_parallel);
3491
3492
  // Create a function that takes as argument the source thread.
3493
57
  FunctionArgList WrapperArgs;
3494
57
  QualType Int16QTy =
3495
57
      Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
3496
57
  QualType Int32QTy =
3497
57
      Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
3498
57
  ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3499
57
                                     /*Id=*/nullptr, Int16QTy,
3500
57
                                     ImplicitParamDecl::Other);
3501
57
  ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3502
57
                               /*Id=*/nullptr, Int32QTy,
3503
57
                               ImplicitParamDecl::Other);
3504
57
  WrapperArgs.emplace_back(&ParallelLevelArg);
3505
57
  WrapperArgs.emplace_back(&WrapperArg);
3506
3507
57
  const CGFunctionInfo &CGFI =
3508
57
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
3509
3510
57
  auto *Fn = llvm::Function::Create(
3511
57
      CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3512
57
      Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
3513
3514
  // Ensure we do not inline the function. This is trivially true for the ones
3515
  // passed to __kmpc_fork_call but the ones calles in serialized regions
3516
  // could be inlined. This is not a perfect but it is closer to the invariant
3517
  // we want, namely, every data environment starts with a new function.
3518
  // TODO: We should pass the if condition to the runtime function and do the
3519
  //       handling there. Much cleaner code.
3520
57
  Fn->addFnAttr(llvm::Attribute::NoInline);
3521
3522
57
  CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3523
57
  Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
3524
57
  Fn->setDoesNotRecurse();
3525
3526
57
  CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3527
57
  CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
3528
57
                    D.getBeginLoc(), D.getBeginLoc());
3529
3530
57
  const auto *RD = CS.getCapturedRecordDecl();
3531
57
  auto CurField = RD->field_begin();
3532
3533
57
  Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3534
57
                                                      /*Name=*/".zero.addr");
3535
57
  CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
3536
  // Get the array of arguments.
3537
57
  SmallVector<llvm::Value *, 8> Args;
3538
3539
57
  Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
3540
57
  Args.emplace_back(ZeroAddr.getPointer());
3541
3542
57
  CGBuilderTy &Bld = CGF.Builder;
3543
57
  auto CI = CS.capture_begin();
3544
3545
  // Use global memory for data sharing.
3546
  // Handle passing of global args to workers.
3547
57
  Address GlobalArgs =
3548
57
      CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
3549
57
  llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
3550
57
  llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
3551
57
  CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3552
57
                          CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
3553
57
                      DataSharingArgs);
3554
3555
  // Retrieve the shared variables from the list of references returned
3556
  // by the runtime. Pass the variables to the outlined function.
3557
57
  Address SharedArgListAddress = Address::invalid();
3558
57
  if (CS.capture_size() > 0 ||
3559
57
      
isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())34
) {
3560
26
    SharedArgListAddress = CGF.EmitLoadOfPointer(
3561
26
        GlobalArgs, CGF.getContext()
3562
26
                        .getPointerType(CGF.getContext().VoidPtrTy)
3563
26
                        .castAs<PointerType>());
3564
26
  }
3565
57
  unsigned Idx = 0;
3566
57
  if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3567
3
    Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3568
3
    Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3569
3
        Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
3570
3
    llvm::Value *LB = CGF.EmitLoadOfScalar(
3571
3
        TypedAddress,
3572
3
        /*Volatile=*/false,
3573
3
        CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3574
3
        cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
3575
3
    Args.emplace_back(LB);
3576
3
    ++Idx;
3577
3
    Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3578
3
    TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3579
3
        Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
3580
3
    llvm::Value *UB = CGF.EmitLoadOfScalar(
3581
3
        TypedAddress,
3582
3
        /*Volatile=*/false,
3583
3
        CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3584
3
        cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
3585
3
    Args.emplace_back(UB);
3586
3
    ++Idx;
3587
3
  }
3588
57
  if (CS.capture_size() > 0) {
3589
23
    ASTContext &CGFContext = CGF.getContext();
3590
63
    for (unsigned I = 0, E = CS.capture_size(); I < E; 
++I, ++CI, ++CurField40
) {
3591
40
      QualType ElemTy = CurField->getType();
3592
40
      Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
3593
40
      Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3594
40
          Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)),
3595
40
          CGF.ConvertTypeForMem(ElemTy));
3596
40
      llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
3597
40
                                              /*Volatile=*/false,
3598
40
                                              CGFContext.getPointerType(ElemTy),
3599
40
                                              CI->getLocation());
3600
40
      if (CI->capturesVariableByCopy() &&
3601
40
          
!CI->getCapturedVar()->getType()->isAnyPointerType()0
) {
3602
0
        Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
3603
0
                              CI->getLocation());
3604
0
      }
3605
40
      Args.emplace_back(Arg);
3606
40
    }
3607
23
  }
3608
3609
57
  emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
3610
57
  CGF.FinishFunction();
3611
57
  return Fn;
3612
57
}
3613
3614
void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
3615
2.10k
                                              const Decl *D) {
3616
2.10k
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3617
340
    return;
3618
3619
1.76k
  assert(D && "Expected function or captured|block decl.");
3620
0
  assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
3621
1.76k
         "Function is registered already.");
3622
0
  assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
3623
1.76k
         "Team is set but not processed.");
3624
0
  const Stmt *Body = nullptr;
3625
1.76k
  bool NeedToDelayGlobalization = false;
3626
1.76k
  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3627
475
    Body = FD->getBody();
3628
1.28k
  } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
3629
0
    Body = BD->getBody();
3630
1.28k
  } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
3631
1.28k
    Body = CD->getBody();
3632
1.28k
    NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
3633
1.28k
    if (NeedToDelayGlobalization &&
3634
1.28k
        getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
3635
1.03k
      return;
3636
1.28k
  }
3637
729
  if (!Body)
3638
0
    return;
3639
729
  CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
3640
729
  VarChecker.Visit(Body);
3641
729
  const RecordDecl *GlobalizedVarsRecord =
3642
729
      VarChecker.getGlobalizedRecord(IsInTTDRegion);
3643
729
  TeamAndReductions.first = nullptr;
3644
729
  TeamAndReductions.second.clear();
3645
729
  ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
3646
729
      VarChecker.getEscapedVariableLengthDecls();
3647
729
  if (!GlobalizedVarsRecord && 
EscapedVariableLengthDecls.empty()680
)
3648
680
    return;
3649
49
  auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
3650
49
  I->getSecond().MappedParams =
3651
49
      std::make_unique<CodeGenFunction::OMPMapVars>();
3652
49
  I->getSecond().EscapedParameters.insert(
3653
49
      VarChecker.getEscapedParameters().begin(),
3654
49
      VarChecker.getEscapedParameters().end());
3655
49
  I->getSecond().EscapedVariableLengthDecls.append(
3656
49
      EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
3657
49
  DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
3658
71
  for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3659
71
    assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3660
0
    Data.insert(std::make_pair(VD, MappedVarData()));
3661
71
  }
3662
49
  if (!IsInTTDRegion && 
!NeedToDelayGlobalization15
&&
!IsInParallelRegion13
) {
3663
13
    CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
3664
13
    VarChecker.Visit(Body);
3665
13
    I->getSecond().SecondaryLocalVarData.emplace();
3666
13
    DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
3667
13
    for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3668
13
      assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3669
0
      Data.insert(std::make_pair(VD, MappedVarData()));
3670
13
    }
3671
13
  }
3672
49
  if (!NeedToDelayGlobalization) {
3673
13
    emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
3674
13
    struct GlobalizationScope final : EHScopeStack::Cleanup {
3675
13
      GlobalizationScope() = default;
3676
3677
13
      void Emit(CodeGenFunction &CGF, Flags flags) override {
3678
13
        static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
3679
13
            .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
3680
13
      }
3681
13
    };
3682
13
    CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
3683
13
  }
3684
49
}
3685
3686
Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
3687
9.39k
                                                        const VarDecl *VD) {
3688
9.39k
  if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
3689
6
    const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3690
6
    auto AS = LangAS::Default;
3691
6
    switch (A->getAllocatorType()) {
3692
      // Use the default allocator here as by default local vars are
3693
      // threadlocal.
3694
0
    case OMPAllocateDeclAttr::OMPNullMemAlloc:
3695
1
    case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3696
2
    case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3697
2
    case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3698
2
    case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3699
      // Follow the user decision - use default allocation.
3700
2
      return Address::invalid();
3701
0
    case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3702
      // TODO: implement aupport for user-defined allocators.
3703
0
      return Address::invalid();
3704
0
    case OMPAllocateDeclAttr::OMPConstMemAlloc:
3705
0
      AS = LangAS::cuda_constant;
3706
0
      break;
3707
1
    case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3708
1
      AS = LangAS::cuda_shared;
3709
1
      break;
3710
0
    case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3711
3
    case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3712
3
      break;
3713
6
    }
3714
4
    llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
3715
4
    auto *GV = new llvm::GlobalVariable(
3716
4
        CGM.getModule(), VarTy, /*isConstant=*/false,
3717
4
        llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
3718
4
        VD->getName(),
3719
4
        /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
3720
4
        CGM.getContext().getTargetAddressSpace(AS));
3721
4
    CharUnits Align = CGM.getContext().getDeclAlign(VD);
3722
4
    GV->setAlignment(Align.getAsAlign());
3723
4
    return Address(
3724
4
        CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3725
4
            GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
3726
4
                    VD->getType().getAddressSpace()))),
3727
4
        VarTy, Align);
3728
6
  }
3729
3730
9.39k
  if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3731
2.13k
    return Address::invalid();
3732
3733
7.25k
  VD = VD->getCanonicalDecl();
3734
7.25k
  auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
3735
7.25k
  if (I == FunctionGlobalizedDecls.end())
3736
6.94k
    return Address::invalid();
3737
305
  auto VDI = I->getSecond().LocalVarData.find(VD);
3738
305
  if (VDI != I->getSecond().LocalVarData.end())
3739
58
    return VDI->second.PrivateAddr;
3740
247
  if (VD->hasAttrs()) {
3741
37
    for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
3742
37
         E(VD->attr_end());
3743
61
         IT != E; 
++IT24
) {
3744
37
      auto VDI = I->getSecond().LocalVarData.find(
3745
37
          cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
3746
37
              ->getCanonicalDecl());
3747
37
      if (VDI != I->getSecond().LocalVarData.end())
3748
13
        return VDI->second.PrivateAddr;
3749
37
    }
3750
37
  }
3751
3752
234
  return Address::invalid();
3753
247
}
3754
3755
2.34k
void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
3756
2.34k
  FunctionGlobalizedDecls.erase(CGF.CurFn);
3757
2.34k
  CGOpenMPRuntime::functionFinished(CGF);
3758
2.34k
}
3759
3760
void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
3761
    CodeGenFunction &CGF, const OMPLoopDirective &S,
3762
    OpenMPDistScheduleClauseKind &ScheduleKind,
3763
278
    llvm::Value *&Chunk) const {
3764
278
  auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
3765
278
  if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
3766
266
    ScheduleKind = OMPC_DIST_SCHEDULE_static;
3767
266
    Chunk = CGF.EmitScalarConversion(
3768
266
        RT.getGPUNumThreads(CGF),
3769
266
        CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3770
266
        S.getIterationVariable()->getType(), S.getBeginLoc());
3771
266
    return;
3772
266
  }
3773
12
  CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
3774
12
      CGF, S, ScheduleKind, Chunk);
3775
12
}
3776
3777
void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
3778
    CodeGenFunction &CGF, const OMPLoopDirective &S,
3779
    OpenMPScheduleClauseKind &ScheduleKind,
3780
102
    const Expr *&ChunkExpr) const {
3781
102
  ScheduleKind = OMPC_SCHEDULE_static;
3782
  // Chunk size is 1 in this case.
3783
102
  llvm::APInt ChunkSize(32, 1);
3784
102
  ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
3785
102
      CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3786
102
      SourceLocation());
3787
102
}
3788
3789
void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
3790
835
    CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
3791
835
  assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
3792
835
         " Expected target-based directive.");
3793
0
  const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
3794
835
  for (const CapturedStmt::Capture &C : CS->captures()) {
3795
    // Capture variables captured by reference in lambdas for target-based
3796
    // directives.
3797
667
    if (!C.capturesVariable())
3798
339
      continue;
3799
328
    const VarDecl *VD = C.getCapturedVar();
3800
328
    const auto *RD = VD->getType()
3801
328
                         .getCanonicalType()
3802
328
                         .getNonReferenceType()
3803
328
                         ->getAsCXXRecordDecl();
3804
328
    if (!RD || 
!RD->isLambda()19
)
3805
328
      continue;
3806
0
    Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3807
0
    LValue VDLVal;
3808
0
    if (VD->getType().getCanonicalType()->isReferenceType())
3809
0
      VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
3810
0
    else
3811
0
      VDLVal = CGF.MakeAddrLValue(
3812
0
          VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
3813
0
    llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
3814
0
    FieldDecl *ThisCapture = nullptr;
3815
0
    RD->getCaptureFields(Captures, ThisCapture);
3816
0
    if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
3817
0
      LValue ThisLVal =
3818
0
          CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
3819
0
      llvm::Value *CXXThis = CGF.LoadCXXThis();
3820
0
      CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
3821
0
    }
3822
0
    for (const LambdaCapture &LC : RD->captures()) {
3823
0
      if (LC.getCaptureKind() != LCK_ByRef)
3824
0
        continue;
3825
0
      const VarDecl *VD = LC.getCapturedVar();
3826
0
      if (!CS->capturesVariable(VD))
3827
0
        continue;
3828
0
      auto It = Captures.find(VD);
3829
0
      assert(It != Captures.end() && "Found lambda capture without field.");
3830
0
      LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
3831
0
      Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3832
0
      if (VD->getType().getCanonicalType()->isReferenceType())
3833
0
        VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
3834
0
                                               VD->getType().getCanonicalType())
3835
0
                     .getAddress(CGF);
3836
0
      CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
3837
0
    }
3838
0
  }
3839
835
}
3840
3841
bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
3842
99
                                                            LangAS &AS) {
3843
99
  if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
3844
86
    return false;
3845
13
  const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3846
13
  switch(A->getAllocatorType()) {
3847
0
  case OMPAllocateDeclAttr::OMPNullMemAlloc:
3848
1
  case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3849
  // Not supported, fallback to the default mem space.
3850
2
  case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3851
4
  case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3852
4
  case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3853
8
  case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3854
9
  case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3855
9
    AS = LangAS::Default;
3856
9
    return true;
3857
2
  case OMPAllocateDeclAttr::OMPConstMemAlloc:
3858
2
    AS = LangAS::cuda_constant;
3859
2
    return true;
3860
2
  case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3861
2
    AS = LangAS::cuda_shared;
3862
2
    return true;
3863
0
  case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3864
0
    llvm_unreachable("Expected predefined allocator for the variables with the "
3865
13
                     "static storage.");
3866
13
  }
3867
0
  return false;
3868
13
}
3869
3870
// Get current CudaArch and ignore any unknown values
3871
17
static CudaArch getCudaArch(CodeGenModule &CGM) {
3872
17
  if (!CGM.getTarget().hasFeature("ptx"))
3873
0
    return CudaArch::UNKNOWN;
3874
33
  
for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap)17
{
3875
33
    if (Feature.getValue()) {
3876
33
      CudaArch Arch = StringToCudaArch(Feature.getKey());
3877
33
      if (Arch != CudaArch::UNKNOWN)
3878
17
        return Arch;
3879
33
    }
3880
33
  }
3881
0
  return CudaArch::UNKNOWN;
3882
17
}
3883
3884
/// Check to see if target architecture supports unified addressing which is
3885
/// a restriction for OpenMP requires clause "unified_shared_memory".
3886
void CGOpenMPRuntimeGPU::processRequiresDirective(
3887
17
    const OMPRequiresDecl *D) {
3888
17
  for (const OMPClause *Clause : D->clauselists()) {
3889
17
    if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
3890
17
      CudaArch Arch = getCudaArch(CGM);
3891
17
      switch (Arch) {
3892
1
      case CudaArch::SM_20:
3893
2
      case CudaArch::SM_21:
3894
3
      case CudaArch::SM_30:
3895
4
      case CudaArch::SM_32:
3896
5
      case CudaArch::SM_35:
3897
6
      case CudaArch::SM_37:
3898
7
      case CudaArch::SM_50:
3899
8
      case CudaArch::SM_52:
3900
9
      case CudaArch::SM_53: {
3901
9
        SmallString<256> Buffer;
3902
9
        llvm::raw_svector_ostream Out(Buffer);
3903
9
        Out << "Target architecture " << CudaArchToString(Arch)
3904
9
            << " does not support unified addressing";
3905
9
        CGM.Error(Clause->getBeginLoc(), Out.str());
3906
9
        return;
3907
8
      }
3908
1
      case CudaArch::SM_60:
3909
2
      case CudaArch::SM_61:
3910
3
      case CudaArch::SM_62:
3911
6
      case CudaArch::SM_70:
3912
7
      case CudaArch::SM_72:
3913
8
      case CudaArch::SM_75:
3914
8
      case CudaArch::SM_80:
3915
8
      case CudaArch::SM_86:
3916
8
      case CudaArch::GFX600:
3917
8
      case CudaArch::GFX601:
3918
8
      case CudaArch::GFX602:
3919
8
      case CudaArch::GFX700:
3920
8
      case CudaArch::GFX701:
3921
8
      case CudaArch::GFX702:
3922
8
      case CudaArch::GFX703:
3923
8
      case CudaArch::GFX704:
3924
8
      case CudaArch::GFX705:
3925
8
      case CudaArch::GFX801:
3926
8
      case CudaArch::GFX802:
3927
8
      case CudaArch::GFX803:
3928
8
      case CudaArch::GFX805:
3929
8
      case CudaArch::GFX810:
3930
8
      case CudaArch::GFX900:
3931
8
      case CudaArch::GFX902:
3932
8
      case CudaArch::GFX904:
3933
8
      case CudaArch::GFX906:
3934
8
      case CudaArch::GFX908:
3935
8
      case CudaArch::GFX909:
3936
8
      case CudaArch::GFX90a:
3937
8
      case CudaArch::GFX90c:
3938
8
      case CudaArch::GFX940:
3939
8
      case CudaArch::GFX1010:
3940
8
      case CudaArch::GFX1011:
3941
8
      case CudaArch::GFX1012:
3942
8
      case CudaArch::GFX1013:
3943
8
      case CudaArch::GFX1030:
3944
8
      case CudaArch::GFX1031:
3945
8
      case CudaArch::GFX1032:
3946
8
      case CudaArch::GFX1033:
3947
8
      case CudaArch::GFX1034:
3948
8
      case CudaArch::GFX1035:
3949
8
      case CudaArch::GFX1036:
3950
8
      case CudaArch::GFX1100:
3951
8
      case CudaArch::GFX1101:
3952
8
      case CudaArch::GFX1102:
3953
8
      case CudaArch::GFX1103:
3954
8
      case CudaArch::Generic:
3955
8
      case CudaArch::UNUSED:
3956
8
      case CudaArch::UNKNOWN:
3957
8
        break;
3958
0
      case CudaArch::LAST:
3959
0
        llvm_unreachable("Unexpected Cuda arch.");
3960
17
      }
3961
17
    }
3962
17
  }
3963
8
  CGOpenMPRuntime::processRequiresDirective(D);
3964
8
}
3965
3966
169
void CGOpenMPRuntimeGPU::clear() {
3967
3968
169
  if (!TeamsReductions.empty()) {
3969
3
    ASTContext &C = CGM.getContext();
3970
3
    RecordDecl *StaticRD = C.buildImplicitRecord(
3971
3
        "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
3972
3
    StaticRD->startDefinition();
3973
9
    for (const RecordDecl *TeamReductionRec : TeamsReductions) {
3974
9
      QualType RecTy = C.getRecordType(TeamReductionRec);
3975
9
      auto *Field = FieldDecl::Create(
3976
9
          C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
3977
9
          C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
3978
9
          /*BW=*/nullptr, /*Mutable=*/false,
3979
9
          /*InitStyle=*/ICIS_NoInit);
3980
9
      Field->setAccess(AS_public);
3981
9
      StaticRD->addDecl(Field);
3982
9
    }
3983
3
    StaticRD->completeDefinition();
3984
3
    QualType StaticTy = C.getRecordType(StaticRD);
3985
3
    llvm::Type *LLVMReductionsBufferTy =
3986
3
        CGM.getTypes().ConvertTypeForMem(StaticTy);
3987
    // FIXME: nvlink does not handle weak linkage correctly (object with the
3988
    // different size are reported as erroneous).
3989
    // Restore CommonLinkage as soon as nvlink is fixed.
3990
3
    auto *GV = new llvm::GlobalVariable(
3991
3
        CGM.getModule(), LLVMReductionsBufferTy,
3992
3
        /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
3993
3
        llvm::Constant::getNullValue(LLVMReductionsBufferTy),
3994
3
        "_openmp_teams_reductions_buffer_$_");
3995
3
    KernelTeamsReductionPtr->setInitializer(
3996
3
        llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
3997
3
                                                             CGM.VoidPtrTy));
3998
3
  }
3999
169
  CGOpenMPRuntime::clear();
4000
169
}
4001
4002
268
llvm::Value *CGOpenMPRuntimeGPU::getGPUNumThreads(CodeGenFunction &CGF) {
4003
268
  CGBuilderTy &Bld = CGF.Builder;
4004
268
  llvm::Module *M = &CGF.CGM.getModule();
4005
268
  const char *LocSize = "__kmpc_get_hardware_num_threads_in_block";
4006
268
  llvm::Function *F = M->getFunction(LocSize);
4007
268
  if (!F) {
4008
33
    F = llvm::Function::Create(
4009
33
        llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false),
4010
33
        llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule());
4011
33
  }
4012
268
  return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
4013
268
}
4014
4015
86
llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) {
4016
86
  ArrayRef<llvm::Value *> Args{};
4017
86
  return CGF.EmitRuntimeCall(
4018
86
      OMPBuilder.getOrCreateRuntimeFunction(
4019
86
          CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block),
4020
86
      Args);
4021
86
}
4022
4023
43
llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) {
4024
43
  ArrayRef<llvm::Value *> Args{};
4025
43
  return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4026
43
                                 CGM.getModule(), OMPRTL___kmpc_get_warp_size),
4027
43
                             Args);
4028
43
}