Coverage Report

Created: 2020-09-19 12:23

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit OpenMP nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGCleanup.h"
14
#include "CGOpenMPRuntime.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "TargetInfo.h"
18
#include "clang/AST/ASTContext.h"
19
#include "clang/AST/Attr.h"
20
#include "clang/AST/DeclOpenMP.h"
21
#include "clang/AST/OpenMPClause.h"
22
#include "clang/AST/Stmt.h"
23
#include "clang/AST/StmtOpenMP.h"
24
#include "clang/AST/StmtVisitor.h"
25
#include "clang/Basic/OpenMPKinds.h"
26
#include "clang/Basic/PrettyStackTrace.h"
27
#include "llvm/Frontend/OpenMP/OMPConstants.h"
28
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
29
#include "llvm/IR/Constants.h"
30
#include "llvm/IR/Instructions.h"
31
#include "llvm/Support/AtomicOrdering.h"
32
using namespace clang;
33
using namespace CodeGen;
34
using namespace llvm::omp;
35
36
static const VarDecl *getBaseDecl(const Expr *Ref);
37
38
namespace {
39
/// Lexical scope for OpenMP executable constructs, that handles correct codegen
40
/// for captured expressions.
41
class OMPLexicalScope : public CodeGenFunction::LexicalScope {
42
14.2k
  void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
43
17.4k
    for (const auto *C : S.clauses()) {
44
17.4k
      if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
45
10.2k
        if (const auto *PreInit =
46
1.02k
                cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
47
1.09k
          for (const auto *I : PreInit->decls()) {
48
1.09k
            if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
49
1.08k
              CGF.EmitVarDecl(cast<VarDecl>(*I));
50
18
            } else {
51
18
              CodeGenFunction::AutoVarEmission Emission =
52
18
                  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
53
18
              CGF.EmitAutoVarCleanups(Emission);
54
18
            }
55
1.09k
          }
56
1.02k
        }
57
10.2k
      }
58
17.4k
    }
59
14.2k
  }
60
  CodeGenFunction::OMPPrivateScope InlinedShareds;
61
62
16.3k
  static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
63
16.3k
    return CGF.LambdaCaptureFields.lookup(VD) ||
64
15.8k
           (CGF.CapturedStmtInfo && 
CGF.CapturedStmtInfo->lookup(VD)3.94k
) ||
65
12.4k
           (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
66
8
            cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
67
16.3k
  }
68
69
public:
70
  OMPLexicalScope(
71
      CodeGenFunction &CGF, const OMPExecutableDirective &S,
72
      const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
73
      const bool EmitPreInitStmt = true)
74
      : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
75
23.3k
        InlinedShareds(CGF) {
76
23.3k
    if (EmitPreInitStmt)
77
14.2k
      emitPreInitStmt(CGF, S);
78
23.3k
    if (!CapturedRegion.hasValue())
79
11.7k
      return;
80
11.5k
    assert(S.hasAssociatedStmt() &&
81
11.5k
           "Expected associated statement for inlined directive.");
82
11.5k
    const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
83
18.5k
    for (const auto &C : CS->captures()) {
84
18.5k
      if (C.capturesVariable() || 
C.capturesVariableByCopy()10.7k
) {
85
16.3k
        auto *VD = C.getCapturedVar();
86
16.3k
        assert(VD == VD->getCanonicalDecl() &&
87
16.3k
               "Canonical decl must be captured.");
88
16.3k
        DeclRefExpr DRE(
89
16.3k
            CGF.getContext(), const_cast<VarDecl *>(VD),
90
16.3k
            isCapturedVar(CGF, VD) || 
(12.4k
CGF.CapturedStmtInfo12.4k
&&
91
542
                                       InlinedShareds.isGlobalVarCaptured(VD)),
92
16.3k
            VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
93
16.3k
        InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
94
16.3k
          return CGF.EmitLValue(&DRE).getAddress(CGF);
95
16.3k
        });
96
16.3k
      }
97
18.5k
    }
98
11.5k
    (void)InlinedShareds.Privatize();
99
11.5k
  }
100
};
101
102
/// Lexical scope for OpenMP parallel construct, that handles correct codegen
103
/// for captured expressions.
104
class OMPParallelScope final : public OMPLexicalScope {
105
5.80k
  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
106
5.80k
    OpenMPDirectiveKind Kind = S.getDirectiveKind();
107
5.80k
    return !(isOpenMPTargetExecutionDirective(Kind) ||
108
2.55k
             isOpenMPLoopBoundSharingDirective(Kind)) &&
109
1.33k
           isOpenMPParallelDirective(Kind);
110
5.80k
  }
111
112
public:
113
  OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
114
      : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
115
5.80k
                        EmitPreInitStmt(S)) {}
116
};
117
118
/// Lexical scope for OpenMP teams construct, that handles correct codegen
119
/// for captured expressions.
120
class OMPTeamsScope final : public OMPLexicalScope {
121
5.32k
  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
122
5.32k
    OpenMPDirectiveKind Kind = S.getDirectiveKind();
123
5.32k
    return !isOpenMPTargetExecutionDirective(Kind) &&
124
1.74k
           isOpenMPTeamsDirective(Kind);
125
5.32k
  }
126
127
public:
128
  OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
129
      : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
130
5.32k
                        EmitPreInitStmt(S)) {}
131
};
132
133
/// Private scope for OpenMP loop-based directives, that supports capturing
134
/// of used expression from loop statement.
135
class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
136
16.2k
  void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
137
16.2k
    CodeGenFunction::OMPMapVars PreCondVars;
138
16.2k
    llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
139
16.9k
    for (const auto *E : S.counters()) {
140
16.9k
      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
141
16.9k
      EmittedAsPrivate.insert(VD->getCanonicalDecl());
142
16.9k
      (void)PreCondVars.setVarAddr(
143
16.9k
          CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
144
16.9k
    }
145
    // Mark private vars as undefs.
146
770
    for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
147
2.90k
      for (const Expr *IRef : C->varlists()) {
148
2.90k
        const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
149
2.90k
        if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
150
2.57k
          (void)PreCondVars.setVarAddr(
151
2.57k
              CGF, OrigVD,
152
2.57k
              Address(llvm::UndefValue::get(
153
2.57k
                          CGF.ConvertTypeForMem(CGF.getContext().getPointerType(
154
2.57k
                              OrigVD->getType().getNonReferenceType()))),
155
2.57k
                      CGF.getContext().getDeclAlign(OrigVD)));
156
2.57k
        }
157
2.90k
      }
158
770
    }
159
16.2k
    (void)PreCondVars.apply(CGF);
160
    // Emit init, __range and __end variables for C++ range loops.
161
16.2k
    const Stmt *Body =
162
16.2k
        S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
163
33.1k
    for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); 
++Cnt16.9k
) {
164
16.9k
      Body = OMPLoopDirective::tryToFindNextInnerLoop(
165
16.9k
          Body, /*TryImperfectlyNestedLoops=*/true);
166
16.9k
      if (auto *For = dyn_cast<ForStmt>(Body)) {
167
16.9k
        Body = For->getBody();
168
6
      } else {
169
6
        assert(isa<CXXForRangeStmt>(Body) &&
170
6
               "Expected canonical for loop or range-based for loop.");
171
6
        auto *CXXFor = cast<CXXForRangeStmt>(Body);
172
6
        if (const Stmt *Init = CXXFor->getInit())
173
0
          CGF.EmitStmt(Init);
174
6
        CGF.EmitStmt(CXXFor->getRangeStmt());
175
6
        CGF.EmitStmt(CXXFor->getEndStmt());
176
6
        Body = CXXFor->getBody();
177
6
      }
178
16.9k
    }
179
16.2k
    if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) {
180
3.15k
      for (const auto *I : PreInits->decls())
181
6.85k
        CGF.EmitVarDecl(cast<VarDecl>(*I));
182
3.15k
    }
183
16.2k
    PreCondVars.restore(CGF);
184
16.2k
  }
185
186
public:
187
  OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
188
16.2k
      : CodeGenFunction::RunCleanupsScope(CGF) {
189
16.2k
    emitPreInitStmt(CGF, S);
190
16.2k
  }
191
};
192
193
class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
194
  CodeGenFunction::OMPPrivateScope InlinedShareds;
195
196
41.0k
  static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
197
41.0k
    return CGF.LambdaCaptureFields.lookup(VD) ||
198
40.0k
           (CGF.CapturedStmtInfo && 
CGF.CapturedStmtInfo->lookup(VD)6.45k
) ||
199
40.0k
           (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
200
78
            cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
201
41.0k
  }
202
203
public:
204
  OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
205
      : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
206
12.6k
        InlinedShareds(CGF) {
207
16.9k
    for (const auto *C : S.clauses()) {
208
16.9k
      if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
209
11.2k
        if (const auto *PreInit =
210
1.25k
                cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
211
1.31k
          for (const auto *I : PreInit->decls()) {
212
1.31k
            if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
213
1.29k
              CGF.EmitVarDecl(cast<VarDecl>(*I));
214
18
            } else {
215
18
              CodeGenFunction::AutoVarEmission Emission =
216
18
                  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
217
18
              CGF.EmitAutoVarCleanups(Emission);
218
18
            }
219
1.31k
          }
220
1.25k
        }
221
5.69k
      } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
222
82
        for (const Expr *E : UDP->varlists()) {
223
82
          const Decl *D = cast<DeclRefExpr>(E)->getDecl();
224
82
          if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
225
20
            CGF.EmitVarDecl(*OED);
226
82
        }
227
5.62k
      } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) {
228
24
        for (const Expr *E : UDP->varlists()) {
229
24
          const Decl *D = getBaseDecl(E);
230
24
          if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
231
10
            CGF.EmitVarDecl(*OED);
232
24
        }
233
6
      }
234
16.9k
    }
235
12.6k
    if (!isOpenMPSimdDirective(S.getDirectiveKind()))
236
9.34k
      CGF.EmitOMPPrivateClause(S, InlinedShareds);
237
12.6k
    if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
238
37
      if (const Expr *E = TG->getReductionRef())
239
26
        CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
240
37
    }
241
12.6k
    const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
242
39.3k
    while (CS) {
243
47.1k
      for (auto &C : CS->captures()) {
244
47.1k
        if (C.capturesVariable() || 
C.capturesVariableByCopy()30.6k
) {
245
41.0k
          auto *VD = C.getCapturedVar();
246
41.0k
          assert(VD == VD->getCanonicalDecl() &&
247
41.0k
                 "Canonical decl must be captured.");
248
41.0k
          DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
249
41.0k
                          isCapturedVar(CGF, VD) ||
250
40.0k
                              (CGF.CapturedStmtInfo &&
251
6.44k
                               InlinedShareds.isGlobalVarCaptured(VD)),
252
41.0k
                          VD->getType().getNonReferenceType(), VK_LValue,
253
41.0k
                          C.getLocation());
254
41.0k
          InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
255
41.0k
            return CGF.EmitLValue(&DRE).getAddress(CGF);
256
41.0k
          });
257
41.0k
        }
258
47.1k
      }
259
26.7k
      CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
260
26.7k
    }
261
12.6k
    (void)InlinedShareds.Privatize();
262
12.6k
  }
263
};
264
265
} // namespace
266
267
static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
268
                                         const OMPExecutableDirective &S,
269
                                         const RegionCodeGenTy &CodeGen);
270
271
12.1k
LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
272
12.1k
  if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
273
8.95k
    if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
274
8.95k
      OrigVD = OrigVD->getCanonicalDecl();
275
8.95k
      bool IsCaptured =
276
8.95k
          LambdaCaptureFields.lookup(OrigVD) ||
277
8.83k
          (CapturedStmtInfo && 
CapturedStmtInfo->lookup(OrigVD)964
) ||
278
8.16k
          (CurCodeDecl && 
isa<BlockDecl>(CurCodeDecl)8.11k
);
279
8.95k
      DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
280
8.95k
                      OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
281
8.95k
      return EmitLValue(&DRE);
282
8.95k
    }
283
3.20k
  }
284
3.20k
  return EmitLValue(E);
285
3.20k
}
286
287
16.6k
llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
288
16.6k
  ASTContext &C = getContext();
289
16.6k
  llvm::Value *Size = nullptr;
290
16.6k
  auto SizeInChars = C.getTypeSizeInChars(Ty);
291
16.6k
  if (SizeInChars.isZero()) {
292
    // getTypeSizeInChars() returns 0 for a VLA.
293
2.32k
    while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
294
1.16k
      VlaSizePair VlaSize = getVLASize(VAT);
295
1.16k
      Ty = VlaSize.Type;
296
0
      Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts)
297
1.16k
                  : VlaSize.NumElts;
298
1.16k
    }
299
1.16k
    SizeInChars = C.getTypeSizeInChars(Ty);
300
1.16k
    if (SizeInChars.isZero())
301
0
      return llvm::ConstantInt::get(SizeTy, /*V=*/0);
302
1.16k
    return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
303
1.16k
  }
304
15.5k
  return CGM.getSize(SizeInChars);
305
15.5k
}
306
307
void CodeGenFunction::GenerateOpenMPCapturedVars(
308
19.8k
    const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
309
19.8k
  const RecordDecl *RD = S.getCapturedRecordDecl();
310
19.8k
  auto CurField = RD->field_begin();
311
19.8k
  auto CurCap = S.captures().begin();
312
19.8k
  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
313
19.8k
                                                 E = S.capture_init_end();
314
51.5k
       I != E; 
++I, ++CurField, ++CurCap31.6k
) {
315
31.6k
    if (CurField->hasCapturedVLAType()) {
316
2.60k
      const VariableArrayType *VAT = CurField->getCapturedVLAType();
317
2.60k
      llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
318
2.60k
      CapturedVars.push_back(Val);
319
29.0k
    } else if (CurCap->capturesThis()) {
320
1.69k
      CapturedVars.push_back(CXXThisValue);
321
27.3k
    } else if (CurCap->capturesVariableByCopy()) {
322
14.5k
      llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
323
14.5k
324
      // If the field is not a pointer, we need to save the actual value
325
      // and load it as a void pointer.
326
14.5k
      if (!CurField->getType()->isAnyPointerType()) {
327
12.7k
        ASTContext &Ctx = getContext();
328
12.7k
        Address DstAddr = CreateMemTemp(
329
12.7k
            Ctx.getUIntPtrType(),
330
12.7k
            Twine(CurCap->getCapturedVar()->getName(), ".casted"));
331
12.7k
        LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
332
12.7k
333
12.7k
        llvm::Value *SrcAddrVal = EmitScalarConversion(
334
12.7k
            DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
335
12.7k
            Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
336
12.7k
        LValue SrcLV =
337
12.7k
            MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
338
12.7k
339
        // Store the value using the source type pointer.
340
12.7k
        EmitStoreThroughLValue(RValue::get(CV), SrcLV);
341
12.7k
342
        // Load the value using the destination type pointer.
343
12.7k
        CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
344
12.7k
      }
345
14.5k
      CapturedVars.push_back(CV);
346
12.8k
    } else {
347
12.8k
      assert(CurCap->capturesVariable() && "Expected capture by reference.");
348
12.8k
      CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer());
349
12.8k
    }
350
31.6k
  }
351
19.8k
}
352
353
static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
354
                                    QualType DstType, StringRef Name,
355
18.1k
                                    LValue AddrLV) {
356
18.1k
  ASTContext &Ctx = CGF.getContext();
357
18.1k
358
18.1k
  llvm::Value *CastedPtr = CGF.EmitScalarConversion(
359
18.1k
      AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
360
18.1k
      Ctx.getPointerType(DstType), Loc);
361
18.1k
  Address TmpAddr =
362
18.1k
      CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
363
18.1k
          .getAddress(CGF);
364
18.1k
  return TmpAddr;
365
18.1k
}
366
367
7.11k
static QualType getCanonicalParamType(ASTContext &C, QualType T) {
368
7.11k
  if (T->isLValueReferenceType())
369
2.08k
    return C.getLValueReferenceType(
370
2.08k
        getCanonicalParamType(C, T.getNonReferenceType()),
371
2.08k
        /*SpelledAsLValue=*/false);
372
5.03k
  if (T->isPointerType())
373
27
    return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
374
5.00k
  if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
375
2.96k
    if (const auto *VLA = dyn_cast<VariableArrayType>(A))
376
2.90k
      return getCanonicalParamType(C, VLA->getElementType());
377
60
    if (!A->isVariablyModifiedType())
378
60
      return C.getCanonicalType(T);
379
2.04k
  }
380
2.04k
  return C.getCanonicalParamType(T);
381
2.04k
}
382
383
namespace {
384
/// Contains required data for proper outlined function codegen.
385
struct FunctionOptions {
386
  /// Captured statement for which the function is generated.
387
  const CapturedStmt *S = nullptr;
388
  /// true if cast to/from  UIntPtr is required for variables captured by
389
  /// value.
390
  const bool UIntPtrCastRequired = true;
391
  /// true if only casted arguments must be registered as local args or VLA
392
  /// sizes.
393
  const bool RegisterCastedArgsOnly = false;
394
  /// Name of the generated function.
395
  const StringRef FunctionName;
396
  /// Location of the non-debug version of the outlined function.
397
  SourceLocation Loc;
398
  explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
399
                           bool RegisterCastedArgsOnly, StringRef FunctionName,
400
                           SourceLocation Loc)
401
      : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
402
        RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
403
22.4k
        FunctionName(FunctionName), Loc(Loc) {}
404
};
405
} // namespace
406
407
static llvm::Function *emitOutlinedFunctionPrologue(
408
    CodeGenFunction &CGF, FunctionArgList &Args,
409
    llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
410
        &LocalAddrs,
411
    llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
412
        &VLASizes,
413
22.4k
    llvm::Value *&CXXThisValue, const FunctionOptions &FO) {
414
22.4k
  const CapturedDecl *CD = FO.S->getCapturedDecl();
415
22.4k
  const RecordDecl *RD = FO.S->getCapturedRecordDecl();
416
22.4k
  assert(CD->hasBody() && "missing CapturedDecl body");
417
22.4k
418
22.4k
  CXXThisValue = nullptr;
419
  // Build the argument list.
420
22.4k
  CodeGenModule &CGM = CGF.CGM;
421
22.4k
  ASTContext &Ctx = CGM.getContext();
422
22.4k
  FunctionArgList TargetArgs;
423
22.4k
  Args.append(CD->param_begin(),
424
22.4k
              std::next(CD->param_begin(), CD->getContextParamPosition()));
425
22.4k
  TargetArgs.append(
426
22.4k
      CD->param_begin(),
427
22.4k
      std::next(CD->param_begin(), CD->getContextParamPosition()));
428
22.4k
  auto I = FO.S->captures().begin();
429
22.4k
  FunctionDecl *DebugFunctionDecl = nullptr;
430
22.4k
  if (!FO.UIntPtrCastRequired) {
431
73
    FunctionProtoType::ExtProtoInfo EPI;
432
73
    QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
433
73
    DebugFunctionDecl = FunctionDecl::Create(
434
73
        Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
435
73
        SourceLocation(), DeclarationName(), FunctionTy,
436
73
        Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
437
73
        /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
438
73
  }
439
36.1k
  for (const FieldDecl *FD : RD->fields()) {
440
36.1k
    QualType ArgType = FD->getType();
441
36.1k
    IdentifierInfo *II = nullptr;
442
36.1k
    VarDecl *CapVar = nullptr;
443
36.1k
444
    // If this is a capture by copy and the type is not a pointer, the outlined
445
    // function argument type should be uintptr and the value properly casted to
446
    // uintptr. This is necessary given that the runtime library is only able to
447
    // deal with pointers. We can pass in the same way the VLA type sizes to the
448
    // outlined function.
449
36.1k
    if (FO.UIntPtrCastRequired &&
450
35.9k
        ((I->capturesVariableByCopy() && 
!ArgType->isAnyPointerType()17.0k
) ||
451
20.8k
         I->capturesVariableArrayType()))
452
18.1k
      ArgType = Ctx.getUIntPtrType();
453
36.1k
454
36.1k
    if (I->capturesVariable() || 
I->capturesVariableByCopy()21.9k
) {
455
31.2k
      CapVar = I->getCapturedVar();
456
31.2k
      II = CapVar->getIdentifier();
457
4.86k
    } else if (I->capturesThis()) {
458
1.85k
      II = &Ctx.Idents.get("this");
459
3.01k
    } else {
460
3.01k
      assert(I->capturesVariableArrayType());
461
3.01k
      II = &Ctx.Idents.get("vla");
462
3.01k
    }
463
36.1k
    if (ArgType->isVariablyModifiedType())
464
2.10k
      ArgType = getCanonicalParamType(Ctx, ArgType);
465
36.1k
    VarDecl *Arg;
466
36.1k
    if (DebugFunctionDecl && 
(185
CapVar185
||
I->capturesThis()15
)) {
467
176
      Arg = ParmVarDecl::Create(
468
176
          Ctx, DebugFunctionDecl,
469
170
          CapVar ? CapVar->getBeginLoc() : 
FD->getBeginLoc()6
,
470
170
          CapVar ? CapVar->getLocation() : 
FD->getLocation()6
, II, ArgType,
471
176
          /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
472
35.9k
    } else {
473
35.9k
      Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
474
35.9k
                                      II, ArgType, ImplicitParamDecl::Other);
475
35.9k
    }
476
36.1k
    Args.emplace_back(Arg);
477
    // Do not cast arguments if we emit function with non-original types.
478
36.1k
    TargetArgs.emplace_back(
479
36.1k
        FO.UIntPtrCastRequired
480
35.9k
            ? Arg
481
185
            : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
482
36.1k
    ++I;
483
36.1k
  }
484
22.4k
  Args.append(
485
22.4k
      std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
486
22.4k
      CD->param_end());
487
22.4k
  TargetArgs.append(
488
22.4k
      std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
489
22.4k
      CD->param_end());
490
22.4k
491
  // Create the function declaration.
492
22.4k
  const CGFunctionInfo &FuncInfo =
493
22.4k
      CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
494
22.4k
  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
495
22.4k
496
22.4k
  auto *F =
497
22.4k
      llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
498
22.4k
                             FO.FunctionName, &CGM.getModule());
499
22.4k
  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
500
22.4k
  if (CD->isNothrow())
501
22.4k
    F->setDoesNotThrow();
502
22.4k
  F->setDoesNotRecurse();
503
22.4k
504
  // Generate the function.
505
22.4k
  CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
506
22.4k
                    FO.UIntPtrCastRequired ? FO.Loc : 
FO.S->getBeginLoc()73
,
507
22.4k
                    FO.UIntPtrCastRequired ? FO.Loc
508
73
                                           : CD->getBody()->getBeginLoc());
509
22.4k
  unsigned Cnt = CD->getContextParamPosition();
510
22.4k
  I = FO.S->captures().begin();
511
36.1k
  for (const FieldDecl *FD : RD->fields()) {
512
    // Do not map arguments if we emit function with non-original types.
513
36.1k
    Address LocalAddr(Address::invalid());
514
36.1k
    if (!FO.UIntPtrCastRequired && 
Args[Cnt] != TargetArgs[Cnt]185
) {
515
54
      LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
516
54
                                                             TargetArgs[Cnt]);
517
36.0k
    } else {
518
36.0k
      LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
519
36.0k
    }
520
    // If we are capturing a pointer by copy we don't need to do anything, just
521
    // use the value that we get from the arguments.
522
36.1k
    if (I->capturesVariableByCopy() && 
FD->getType()->isAnyPointerType()17.0k
) {
523
1.91k
      const VarDecl *CurVD = I->getCapturedVar();
524
1.91k
      if (!FO.RegisterCastedArgsOnly)
525
1.90k
        LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
526
1.91k
      ++Cnt;
527
1.91k
      ++I;
528
1.91k
      continue;
529
1.91k
    }
530
34.2k
531
34.2k
    LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
532
34.2k
                                        AlignmentSource::Decl);
533
34.2k
    if (FD->hasCapturedVLAType()) {
534
3.01k
      if (FO.UIntPtrCastRequired) {
535
3.00k
        ArgLVal = CGF.MakeAddrLValue(
536
3.00k
            castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
537
3.00k
                                 Args[Cnt]->getName(), ArgLVal),
538
3.00k
            FD->getType(), AlignmentSource::Decl);
539
3.00k
      }
540
3.01k
      llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
541
3.01k
      const VariableArrayType *VAT = FD->getCapturedVLAType();
542
3.01k
      VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
543
31.2k
    } else if (I->capturesVariable()) {
544
14.1k
      const VarDecl *Var = I->getCapturedVar();
545
14.1k
      QualType VarTy = Var->getType();
546
14.1k
      Address ArgAddr = ArgLVal.getAddress(CGF);
547
14.1k
      if (ArgLVal.getType()->isLValueReferenceType()) {
548
14.1k
        ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
549
0
      } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
550
0
        assert(ArgLVal.getType()->isPointerType());
551
0
        ArgAddr = CGF.EmitLoadOfPointer(
552
0
            ArgAddr, ArgLVal.getType()->castAs<PointerType>());
553
0
      }
554
14.1k
      if (!FO.RegisterCastedArgsOnly) {
555
14.0k
        LocalAddrs.insert(
556
14.0k
            {Args[Cnt],
557
14.0k
             {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}});
558
14.0k
      }
559
17.0k
    } else if (I->capturesVariableByCopy()) {
560
15.1k
      assert(!FD->getType()->isAnyPointerType() &&
561
15.1k
             "Not expecting a captured pointer.");
562
15.1k
      const VarDecl *Var = I->getCapturedVar();
563
15.1k
      LocalAddrs.insert({Args[Cnt],
564
15.1k
                         {Var, FO.UIntPtrCastRequired
565
15.1k
                                   ? castValueFromUintptr(
566
15.1k
                                         CGF, I->getLocation(), FD->getType(),
567
15.1k
                                         Args[Cnt]->getName(), ArgLVal)
568
22
                                   : ArgLVal.getAddress(CGF)}});
569
1.85k
    } else {
570
      // If 'this' is captured, load it into CXXThisValue.
571
1.85k
      assert(I->capturesThis());
572
1.85k
      CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
573
1.85k
      LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
574
1.85k
    }
575
34.2k
    ++Cnt;
576
34.2k
    ++I;
577
34.2k
  }
578
22.4k
579
22.4k
  return F;
580
22.4k
}
581
582
llvm::Function *
583
CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
584
22.4k
                                                    SourceLocation Loc) {
585
22.4k
  assert(
586
22.4k
      CapturedStmtInfo &&
587
22.4k
      "CapturedStmtInfo should be set when generating the captured function");
588
22.4k
  const CapturedDecl *CD = S.getCapturedDecl();
589
  // Build the argument list.
590
22.4k
  bool NeedWrapperFunction =
591
22.4k
      getDebugInfo() && 
CGM.getCodeGenOpts().hasReducedDebugInfo()224
;
592
22.4k
  FunctionArgList Args;
593
22.4k
  llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
594
22.4k
  llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
595
22.4k
  SmallString<256> Buffer;
596
22.4k
  llvm::raw_svector_ostream Out(Buffer);
597
22.4k
  Out << CapturedStmtInfo->getHelperName();
598
22.4k
  if (NeedWrapperFunction)
599
73
    Out << "_debug__";
600
22.4k
  FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
601
22.4k
                     Out.str(), Loc);
602
22.4k
  llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
603
22.4k
                                                   VLASizes, CXXThisValue, FO);
604
22.4k
  CodeGenFunction::OMPPrivateScope LocalScope(*this);
605
32.9k
  for (const auto &LocalAddrPair : LocalAddrs) {
606
32.9k
    if (LocalAddrPair.second.first) {
607
31.0k
      LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() {
608
31.0k
        return LocalAddrPair.second.second;
609
31.0k
      });
610
31.0k
    }
611
32.9k
  }
612
22.4k
  (void)LocalScope.Privatize();
613
22.4k
  for (const auto &VLASizePair : VLASizes)
614
3.00k
    VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
615
22.4k
  PGO.assignRegionCounters(GlobalDecl(CD), F);
616
22.4k
  CapturedStmtInfo->EmitBody(*this, CD->getBody());
617
22.4k
  (void)LocalScope.ForceCleanup();
618
22.4k
  FinishFunction(CD->getBodyRBrace());
619
22.4k
  if (!NeedWrapperFunction)
620
22.3k
    return F;
621
73
622
73
  FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
623
73
                            /*RegisterCastedArgsOnly=*/true,
624
73
                            CapturedStmtInfo->getHelperName(), Loc);
625
73
  CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
626
73
  WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
627
73
  Args.clear();
628
73
  LocalAddrs.clear();
629
73
  VLASizes.clear();
630
73
  llvm::Function *WrapperF =
631
73
      emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
632
73
                                   WrapperCGF.CXXThisValue, WrapperFO);
633
73
  llvm::SmallVector<llvm::Value *, 4> CallArgs;
634
295
  for (const auto *Arg : Args) {
635
295
    llvm::Value *CallArg;
636
295
    auto I = LocalAddrs.find(Arg);
637
295
    if (I != LocalAddrs.end()) {
638
28
      LValue LV = WrapperCGF.MakeAddrLValue(
639
28
          I->second.second,
640
22
          I->second.first ? I->second.first->getType() : 
Arg->getType()6
,
641
28
          AlignmentSource::Decl);
642
28
      CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
643
267
    } else {
644
267
      auto EI = VLASizes.find(Arg);
645
267
      if (EI != VLASizes.end()) {
646
9
        CallArg = EI->second.second;
647
258
      } else {
648
258
        LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
649
258
                                              Arg->getType(),
650
258
                                              AlignmentSource::Decl);
651
258
        CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
652
258
      }
653
267
    }
654
295
    CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
655
295
  }
656
73
  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
657
73
  WrapperCGF.FinishFunction();
658
73
  return WrapperF;
659
73
}
660
661
//===----------------------------------------------------------------------===//
662
//                              OpenMP Directive Emission
663
//===----------------------------------------------------------------------===//
664
void CodeGenFunction::EmitOMPAggregateAssign(
665
    Address DestAddr, Address SrcAddr, QualType OriginalType,
666
539
    const llvm::function_ref<void(Address, Address)> CopyGen) {
667
  // Perform element-by-element initialization.
668
539
  QualType ElementTy;
669
539
670
  // Drill down to the base element type on both arrays.
671
539
  const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
672
539
  llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
673
539
  SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
674
539
675
539
  llvm::Value *SrcBegin = SrcAddr.getPointer();
676
539
  llvm::Value *DestBegin = DestAddr.getPointer();
677
  // Cast from pointer to array type to pointer to single element.
678
539
  llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements);
679
  // The basic structure here is a while-do loop.
680
539
  llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
681
539
  llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
682
539
  llvm::Value *IsEmpty =
683
539
      Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
684
539
  Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
685
539
686
  // Enter the loop body, making that address the current address.
687
539
  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
688
539
  EmitBlock(BodyBB);
689
539
690
539
  CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
691
539
692
539
  llvm::PHINode *SrcElementPHI =
693
539
    Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
694
539
  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
695
539
  Address SrcElementCurrent =
696
539
      Address(SrcElementPHI,
697
539
              SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
698
539
699
539
  llvm::PHINode *DestElementPHI =
700
539
    Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
701
539
  DestElementPHI->addIncoming(DestBegin, EntryBB);
702
539
  Address DestElementCurrent =
703
539
    Address(DestElementPHI,
704
539
            DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
705
539
706
  // Emit copy.
707
539
  CopyGen(DestElementCurrent, SrcElementCurrent);
708
539
709
  // Shift the address forward by one element.
710
539
  llvm::Value *DestElementNext = Builder.CreateConstGEP1_32(
711
539
      DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
712
539
  llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32(
713
539
      SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
714
  // Check whether we've reached the end.
715
539
  llvm::Value *Done =
716
539
      Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
717
539
  Builder.CreateCondBr(Done, DoneBB, BodyBB);
718
539
  DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
719
539
  SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
720
539
721
  // Done.
722
539
  EmitBlock(DoneBB, /*IsFinished=*/true);
723
539
}
724
725
void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
726
                                  Address SrcAddr, const VarDecl *DestVD,
727
2.02k
                                  const VarDecl *SrcVD, const Expr *Copy) {
728
2.02k
  if (OriginalType->isArrayType()) {
729
602
    const auto *BO = dyn_cast<BinaryOperator>(Copy);
730
602
    if (BO && 
BO->getOpcode() == BO_Assign335
) {
731
      // Perform simple memcpy for simple copying.
732
335
      LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
733
335
      LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
734
335
      EmitAggregateAssign(Dest, Src, OriginalType);
735
267
    } else {
736
      // For arrays with complex element types perform element by element
737
      // copying.
738
267
      EmitOMPAggregateAssign(
739
267
          DestAddr, SrcAddr, OriginalType,
740
267
          [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
741
            // Working with the single array element, so have to remap
742
            // destination and source variables to corresponding array
743
            // elements.
744
267
            CodeGenFunction::OMPPrivateScope Remap(*this);
745
267
            Remap.addPrivate(DestVD, [DestElement]() { return DestElement; });
746
267
            Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; });
747
267
            (void)Remap.Privatize();
748
267
            EmitIgnoredExpr(Copy);
749
267
          });
750
267
    }
751
1.42k
  } else {
752
    // Remap pseudo source variable to private copy.
753
1.42k
    CodeGenFunction::OMPPrivateScope Remap(*this);
754
1.42k
    Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; });
755
1.42k
    Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; });
756
1.42k
    (void)Remap.Privatize();
757
    // Emit copying of the whole variable.
758
1.42k
    EmitIgnoredExpr(Copy);
759
1.42k
  }
760
2.02k
}
761
762
bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
763
17.8k
                                                OMPPrivateScope &PrivateScope) {
764
17.8k
  if (!HaveInsertPoint())
765
0
    return false;
766
17.8k
  bool DeviceConstTarget =
767
17.8k
      getLangOpts().OpenMPIsDevice &&
768
3.81k
      isOpenMPTargetExecutionDirective(D.getDirectiveKind());
769
17.8k
  bool FirstprivateIsLastprivate = false;
770
17.8k
  llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
771
436
  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
772
436
    for (const auto *D : C->varlists())
773
1.54k
      Lastprivates.try_emplace(
774
1.54k
          cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(),
775
1.54k
          C->getKind());
776
436
  }
777
17.8k
  llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
778
17.8k
  llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
779
17.8k
  getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
780
  // Force emission of the firstprivate copy if the directive does not emit
781
  // outlined function, like omp for, omp simd, omp distribute etc.
782
17.8k
  bool MustEmitFirstprivateCopy =
783
17.8k
      CaptureRegions.size() == 1 && 
CaptureRegions.back() == OMPD_unknown4.59k
;
784
6.77k
  for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
785
6.77k
    const auto *IRef = C->varlist_begin();
786
6.77k
    const auto *InitsRef = C->inits().begin();
787
10.3k
    for (const Expr *IInit : C->private_copies()) {
788
10.3k
      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
789
10.3k
      bool ThisFirstprivateIsLastprivate =
790
10.3k
          Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
791
10.3k
      const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
792
10.3k
      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
793
10.3k
      if (!MustEmitFirstprivateCopy && 
!ThisFirstprivateIsLastprivate10.1k
&&
FD10.1k
&&
794
10.1k
          !FD->getType()->isReferenceType() &&
795
9.03k
          (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
796
9.03k
        EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
797
9.03k
        ++IRef;
798
9.03k
        ++InitsRef;
799
9.03k
        continue;
800
9.03k
      }
801
      // Do not emit copy for firstprivate constant variables in target regions,
802
      // captured by reference.
803
1.35k
      if (DeviceConstTarget && 
OrigVD->getType().isConstant(getContext())201
&&
804
6
          FD && FD->getType()->isReferenceType() &&
805
6
          (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
806
6
        (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this,
807
6
                                                                    OrigVD);
808
6
        ++IRef;
809
6
        ++InitsRef;
810
6
        continue;
811
6
      }
812
1.34k
      FirstprivateIsLastprivate =
813
1.34k
          FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
814
1.34k
      if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
815
1.25k
        const auto *VDInit =
816
1.25k
            cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
817
1.25k
        bool IsRegistered;
818
1.25k
        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
819
1.25k
                        /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
820
1.25k
                        (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
821
1.25k
        LValue OriginalLVal;
822
1.25k
        if (!FD) {
823
          // Check if the firstprivate variable is just a constant value.
824
50
          ConstantEmission CE = tryEmitAsConstant(&DRE);
825
50
          if (CE && 
!CE.isReference()6
) {
826
            // Constant value, no need to create a copy.
827
4
            ++IRef;
828
4
            ++InitsRef;
829
4
            continue;
830
4
          }
831
46
          if (CE && 
CE.isReference()2
) {
832
2
            OriginalLVal = CE.getReferenceLValue(*this, &DRE);
833
44
          } else {
834
44
            assert(!CE && "Expected non-constant firstprivate.");
835
44
            OriginalLVal = EmitLValue(&DRE);
836
44
          }
837
1.20k
        } else {
838
1.20k
          OriginalLVal = EmitLValue(&DRE);
839
1.20k
        }
840
1.24k
        QualType Type = VD->getType();
841
1.24k
        if (Type->isArrayType()) {
842
          // Emit VarDecl with copy init for arrays.
843
          // Get the address of the original variable captured in current
844
          // captured region.
845
617
          IsRegistered = PrivateScope.addPrivate(
846
617
              OrigVD, [this, VD, Type, OriginalLVal, VDInit]() {
847
617
                AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
848
617
                const Expr *Init = VD->getInit();
849
617
                if (!isa<CXXConstructExpr>(Init) ||
850
397
                    
isTrivialInitializer(Init)220
) {
851
                  // Perform simple memcpy.
852
397
                  LValue Dest =
853
397
                      MakeAddrLValue(Emission.getAllocatedAddress(), Type);
854
397
                  EmitAggregateAssign(Dest, OriginalLVal, Type);
855
220
                } else {
856
220
                  EmitOMPAggregateAssign(
857
220
                      Emission.getAllocatedAddress(),
858
220
                      OriginalLVal.getAddress(*this), Type,
859
220
                      [this, VDInit, Init](Address DestElement,
860
220
                                           Address SrcElement) {
861
                        // Clean up any temporaries needed by the
862
                        // initialization.
863
220
                        RunCleanupsScope InitScope(*this);
864
                        // Emit initialization for single element.
865
220
                        setAddrOfLocalVar(VDInit, SrcElement);
866
220
                        EmitAnyExprToMem(Init, DestElement,
867
220
                                         Init->getType().getQualifiers(),
868
220
                                         /*IsInitializer*/ false);
869
220
                        LocalDeclMap.erase(VDInit);
870
220
                      });
871
220
                }
872
617
                EmitAutoVarCleanups(Emission);
873
617
                return Emission.getAllocatedAddress();
874
617
              });
875
630
        } else {
876
630
          Address OriginalAddr = OriginalLVal.getAddress(*this);
877
630
          IsRegistered =
878
630
              PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD,
879
630
                                               ThisFirstprivateIsLastprivate,
880
630
                                               OrigVD, &Lastprivates, IRef]() {
881
                // Emit private VarDecl with copy init.
882
                // Remap temp VDInit variable to the address of the original
883
                // variable (for proper handling of captured global variables).
884
630
                setAddrOfLocalVar(VDInit, OriginalAddr);
885
630
                EmitDecl(*VD);
886
630
                LocalDeclMap.erase(VDInit);
887
630
                if (ThisFirstprivateIsLastprivate &&
888
8
                    Lastprivates[OrigVD->getCanonicalDecl()] ==
889
0
                        OMPC_LASTPRIVATE_conditional) {
890
                  // Create/init special variable for lastprivate conditionals.
891
0
                  Address VDAddr =
892
0
                      CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
893
0
                          *this, OrigVD);
894
0
                  llvm::Value *V = EmitLoadOfScalar(
895
0
                      MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(),
896
0
                                     AlignmentSource::Decl),
897
0
                      (*IRef)->getExprLoc());
898
0
                  EmitStoreOfScalar(V,
899
0
                                    MakeAddrLValue(VDAddr, (*IRef)->getType(),
900
0
                                                   AlignmentSource::Decl));
901
0
                  LocalDeclMap.erase(VD);
902
0
                  setAddrOfLocalVar(VD, VDAddr);
903
0
                  return VDAddr;
904
0
                }
905
630
                return GetAddrOfLocalVar(VD);
906
630
              });
907
630
        }
908
1.24k
        assert(IsRegistered &&
909
1.24k
               "firstprivate var already registered as private");
910
        // Silence the warning about unused variable.
911
1.24k
        (void)IsRegistered;
912
1.24k
      }
913
1.34k
      ++IRef;
914
1.34k
      ++InitsRef;
915
1.34k
    }
916
6.77k
  }
917
17.8k
  return FirstprivateIsLastprivate && 
!EmittedAsFirstprivate.empty()8
;
918
17.8k
}
919
920
void CodeGenFunction::EmitOMPPrivateClause(
921
    const OMPExecutableDirective &D,
922
31.1k
    CodeGenFunction::OMPPrivateScope &PrivateScope) {
923
31.1k
  if (!HaveInsertPoint())
924
0
    return;
925
31.1k
  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
926
1.03k
  for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
927
1.03k
    auto IRef = C->varlist_begin();
928
3.19k
    for (const Expr *IInit : C->private_copies()) {
929
3.19k
      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
930
3.19k
      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
931
2.92k
        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
932
2.92k
        bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
933
          // Emit private VarDecl with copy init.
934
2.92k
          EmitDecl(*VD);
935
2.92k
          return GetAddrOfLocalVar(VD);
936
2.92k
        });
937
2.92k
        assert(IsRegistered && "private var already registered as private");
938
        // Silence the warning about unused variable.
939
2.92k
        (void)IsRegistered;
940
2.92k
      }
941
3.19k
      ++IRef;
942
3.19k
    }
943
1.03k
  }
944
31.1k
}
945
946
933
bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
947
933
  if (!HaveInsertPoint())
948
0
    return false;
949
  // threadprivate_var1 = master_threadprivate_var1;
950
  // operator=(threadprivate_var2, master_threadprivate_var2);
951
  // ...
952
  // __kmpc_barrier(&loc, global_tid);
953
933
  llvm::DenseSet<const VarDecl *> CopiedVars;
954
933
  llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
955
27
  for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
956
27
    auto IRef = C->varlist_begin();
957
27
    auto ISrcRef = C->source_exprs().begin();
958
27
    auto IDestRef = C->destination_exprs().begin();
959
53
    for (const Expr *AssignOp : C->assignment_ops()) {
960
53
      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
961
53
      QualType Type = VD->getType();
962
53
      if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
963
        // Get the address of the master variable. If we are emitting code with
964
        // TLS support, the address is passed from the master as field in the
965
        // captured declaration.
966
53
        Address MasterAddr = Address::invalid();
967
53
        if (getLangOpts().OpenMPUseTLS &&
968
27
            getContext().getTargetInfo().isTLSSupported()) {
969
27
          assert(CapturedStmtInfo->lookup(VD) &&
970
27
                 "Copyin threadprivates should have been captured!");
971
27
          DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
972
27
                          (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
973
27
          MasterAddr = EmitLValue(&DRE).getAddress(*this);
974
27
          LocalDeclMap.erase(VD);
975
26
        } else {
976
26
          MasterAddr =
977
22
            Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
978
4
                                        : CGM.GetAddrOfGlobal(VD),
979
26
                    getContext().getDeclAlign(VD));
980
26
        }
981
        // Get the address of the threadprivate variable.
982
53
        Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
983
53
        if (CopiedVars.size() == 1) {
984
          // At first check if current thread is a master thread. If it is, no
985
          // need to copy data.
986
27
          CopyBegin = createBasicBlock("copyin.not.master");
987
27
          CopyEnd = createBasicBlock("copyin.not.master.end");
988
27
          Builder.CreateCondBr(
989
27
              Builder.CreateICmpNE(
990
27
                  Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
991
27
                  Builder.CreatePtrToInt(PrivateAddr.getPointer(),
992
27
                                         CGM.IntPtrTy)),
993
27
              CopyBegin, CopyEnd);
994
27
          EmitBlock(CopyBegin);
995
27
        }
996
53
        const auto *SrcVD =
997
53
            cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
998
53
        const auto *DestVD =
999
53
            cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1000
53
        EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
1001
53
      }
1002
53
      ++IRef;
1003
53
      ++ISrcRef;
1004
53
      ++IDestRef;
1005
53
    }
1006
27
  }
1007
933
  if (CopyEnd) {
1008
    // Exit out of copying procedure for non-master thread.
1009
27
    EmitBlock(CopyEnd, /*IsFinished=*/true);
1010
27
    return true;
1011
27
  }
1012
906
  return false;
1013
906
}
1014
1015
bool CodeGenFunction::EmitOMPLastprivateClauseInit(
1016
12.8k
    const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
1017
12.8k
  if (!HaveInsertPoint())
1018
0
    return false;
1019
12.8k
  bool HasAtLeastOneLastprivate = false;
1020
12.8k
  llvm::DenseSet<const VarDecl *> SIMDLCVs;
1021
12.8k
  if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1022
8.37k
    const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1023
8.74k
    for (const Expr *C : LoopDirective->counters()) {
1024
8.74k
      SIMDLCVs.insert(
1025
8.74k
          cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1026
8.74k
    }
1027
8.37k
  }
1028
12.8k
  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1029
612
  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1030
612
    HasAtLeastOneLastprivate = true;
1031
612
    if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
1032
74
        !getLangOpts().OpenMPSimd)
1033
49
      break;
1034
563
    const auto *IRef = C->varlist_begin();
1035
563
    const auto *IDestRef = C->destination_exprs().begin();
1036
2.08k
    for (const Expr *IInit : C->private_copies()) {
1037
      // Keep the address of the original variable for future update at the end
1038
      // of the loop.
1039
2.08k
      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1040
      // Taskloops do not require additional initialization, it is done in
1041
      // runtime support library.
1042
2.08k
      if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
1043
1.66k
        const auto *DestVD =
1044
1.66k
            cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1045
1.66k
        PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
1046
1.66k
          DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1047
                          /*RefersToEnclosingVariableOrCapture=*/
1048
1.66k
                              CapturedStmtInfo->lookup(OrigVD) != nullptr,
1049
1.66k
                          (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
1050
1.66k
          return EmitLValue(&DRE).getAddress(*this);
1051
1.66k
        });
1052
        // Check if the variable is also a firstprivate: in this case IInit is
1053
        // not generated. Initialization of this variable will happen in codegen
1054
        // for 'firstprivate' clause.
1055
1.66k
        if (IInit && 
!SIMDLCVs.count(OrigVD->getCanonicalDecl())1.59k
) {
1056
1.58k
          const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1057
1.58k
          bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C,
1058
1.58k
                                                               OrigVD]() {
1059
1.58k
            if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
1060
10
              Address VDAddr =
1061
10
                  CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this,
1062
10
                                                                        OrigVD);
1063
10
              setAddrOfLocalVar(VD, VDAddr);
1064
10
              return VDAddr;
1065
10
            }
1066
            // Emit private VarDecl with copy init.
1067
1.57k
            EmitDecl(*VD);
1068
1.57k
            return GetAddrOfLocalVar(VD);
1069
1.57k
          });
1070
1.58k
          assert(IsRegistered &&
1071
1.58k
                 "lastprivate var already registered as private");
1072
1.58k
          (void)IsRegistered;
1073
1.58k
        }
1074
1.66k
      }
1075
2.08k
      ++IRef;
1076
2.08k
      ++IDestRef;
1077
2.08k
    }
1078
563
  }
1079
12.8k
  return HasAtLeastOneLastprivate;
1080
12.8k
}
1081
1082
void CodeGenFunction::EmitOMPLastprivateClauseFinal(
1083
    const OMPExecutableDirective &D, bool NoFinals,
1084
604
    llvm::Value *IsLastIterCond) {
1085
604
  if (!HaveInsertPoint())
1086
0
    return;
1087
  // Emit following code:
1088
  // if (<IsLastIterCond>) {
1089
  //   orig_var1 = private_orig_var1;
1090
  //   ...
1091
  //   orig_varn = private_orig_varn;
1092
  // }
1093
604
  llvm::BasicBlock *ThenBB = nullptr;
1094
604
  llvm::BasicBlock *DoneBB = nullptr;
1095
604
  if (IsLastIterCond) {
1096
    // Emit implicit barrier if at least one lastprivate conditional is found
1097
    // and this is not a simd mode.
1098
477
    if (!getLangOpts().OpenMPSimd &&
1099
477
        llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(),
1100
485
                     [](const OMPLastprivateClause *C) {
1101
485
                       return C->getKind() == OMPC_LASTPRIVATE_conditional;
1102
6
                     })) {
1103
6
      CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(),
1104
6
                                             OMPD_unknown,
1105
6
                                             /*EmitChecks=*/false,
1106
6
                                             /*ForceSimpleCall=*/true);
1107
6
    }
1108
477
    ThenBB = createBasicBlock(".omp.lastprivate.then");
1109
477
    DoneBB = createBasicBlock(".omp.lastprivate.done");
1110
477
    Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1111
477
    EmitBlock(ThenBB);
1112
477
  }
1113
604
  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1114
604
  llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1115
604
  if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1116
588
    auto IC = LoopDirective->counters().begin();
1117
608
    for (const Expr *F : LoopDirective->finals()) {
1118
608
      const auto *D =
1119
608
          cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1120
608
      if (NoFinals)
1121
232
        AlreadyEmittedVars.insert(D);
1122
376
      else
1123
376
        LoopCountersAndUpdates[D] = F;
1124
608
      ++IC;
1125
608
    }
1126
588
  }
1127
612
  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1128
612
    auto IRef = C->varlist_begin();
1129
612
    auto ISrcRef = C->source_exprs().begin();
1130
612
    auto IDestRef = C->destination_exprs().begin();
1131
2.28k
    for (const Expr *AssignOp : C->assignment_ops()) {
1132
2.28k
      const auto *PrivateVD =
1133
2.28k
          cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1134
2.28k
      QualType Type = PrivateVD->getType();
1135
2.28k
      const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1136
2.28k
      if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1137
        // If lastprivate variable is a loop control variable for loop-based
1138
        // directive, update its value before copyin back to original
1139
        // variable.
1140
1.80k
        if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1141
13
          EmitIgnoredExpr(FinalExpr);
1142
1.80k
        const auto *SrcVD =
1143
1.80k
            cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1144
1.80k
        const auto *DestVD =
1145
1.80k
            cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1146
        // Get the address of the private variable.
1147
1.80k
        Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1148
1.80k
        if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1149
326
          PrivateAddr =
1150
326
              Address(Builder.CreateLoad(PrivateAddr),
1151
326
                      CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
1152
        // Store the last value to the private copy in the last iteration.
1153
1.80k
        if (C->getKind() == OMPC_LASTPRIVATE_conditional)
1154
10
          CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
1155
10
              *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD,
1156
10
              (*IRef)->getExprLoc());
1157
        // Get the address of the original variable.
1158
1.80k
        Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1159
1.80k
        EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1160
1.80k
      }
1161
2.28k
      ++IRef;
1162
2.28k
      ++ISrcRef;
1163
2.28k
      ++IDestRef;
1164
2.28k
    }
1165
612
    if (const Expr *PostUpdate = C->getPostUpdateExpr())
1166
10
      EmitIgnoredExpr(PostUpdate);
1167
612
  }
1168
604
  if (IsLastIterCond)
1169
477
    EmitBlock(DoneBB, /*IsFinished=*/true);
1170
604
}
1171
1172
void CodeGenFunction::EmitOMPReductionClauseInit(
1173
    const OMPExecutableDirective &D,
1174
26.2k
    CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) {
1175
26.2k
  if (!HaveInsertPoint())
1176
0
    return;
1177
26.2k
  SmallVector<const Expr *, 4> Shareds;
1178
26.2k
  SmallVector<const Expr *, 4> Privates;
1179
26.2k
  SmallVector<const Expr *, 4> ReductionOps;
1180
26.2k
  SmallVector<const Expr *, 4> LHSs;
1181
26.2k
  SmallVector<const Expr *, 4> RHSs;
1182
26.2k
  OMPTaskDataTy Data;
1183
26.2k
  SmallVector<const Expr *, 4> TaskLHSs;
1184
26.2k
  SmallVector<const Expr *, 4> TaskRHSs;
1185
1.12k
  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1186
1.12k
    if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan))
1187
417
      continue;
1188
707
    Shareds.append(C->varlist_begin(), C->varlist_end());
1189
707
    Privates.append(C->privates().begin(), C->privates().end());
1190
707
    ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1191
707
    LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1192
707
    RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1193
707
    if (C->getModifier() == OMPC_REDUCTION_task) {
1194
26
      Data.ReductionVars.append(C->privates().begin(), C->privates().end());
1195
26
      Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
1196
26
      Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
1197
26
      Data.ReductionOps.append(C->reduction_ops().begin(),
1198
26
                               C->reduction_ops().end());
1199
26
      TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1200
26
      TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1201
26
    }
1202
707
  }
1203
26.2k
  ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
1204
26.2k
  unsigned Count = 0;
1205
26.2k
  auto *ILHS = LHSs.begin();
1206
26.2k
  auto *IRHS = RHSs.begin();
1207
26.2k
  auto *IPriv = Privates.begin();
1208
796
  for (const Expr *IRef : Shareds) {
1209
796
    const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1210
    // Emit private VarDecl with reduction init.
1211
796
    RedCG.emitSharedOrigLValue(*this, Count);
1212
796
    RedCG.emitAggregateType(*this, Count);
1213
796
    AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1214
796
    RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1215
796
                             RedCG.getSharedLValue(Count),
1216
604
                             [&Emission](CodeGenFunction &CGF) {
1217
604
                               CGF.EmitAutoVarInit(Emission);
1218
604
                               return true;
1219
604
                             });
1220
796
    EmitAutoVarCleanups(Emission);
1221
796
    Address BaseAddr = RedCG.adjustPrivateAddress(
1222
796
        *this, Count, Emission.getAllocatedAddress());
1223
796
    bool IsRegistered = PrivateScope.addPrivate(
1224
796
        RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; });
1225
796
    assert(IsRegistered && "private var already registered as private");
1226
    // Silence the warning about unused variable.
1227
796
    (void)IsRegistered;
1228
796
1229
796
    const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1230
796
    const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1231
796
    QualType Type = PrivateVD->getType();
1232
796
    bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
1233
796
    if (isaOMPArraySectionExpr && 
Type->isVariablyModifiedType()151
) {
1234
      // Store the address of the original variable associated with the LHS
1235
      // implicit variable.
1236
108
      PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
1237
108
        return RedCG.getSharedLValue(Count).getAddress(*this);
1238
108
      });
1239
108
      PrivateScope.addPrivate(
1240
108
          RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); });
1241
688
    } else if ((isaOMPArraySectionExpr && 
Type->isScalarType()43
) ||
1242
688
               isa<ArraySubscriptExpr>(IRef)) {
1243
      // Store the address of the original variable associated with the LHS
1244
      // implicit variable.
1245
0
      PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
1246
0
        return RedCG.getSharedLValue(Count).getAddress(*this);
1247
0
      });
1248
0
      PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() {
1249
0
        return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD),
1250
0
                                            ConvertTypeForMem(RHSVD->getType()),
1251
0
                                            "rhs.begin");
1252
0
      });
1253
688
    } else {
1254
688
      QualType Type = PrivateVD->getType();
1255
688
      bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1256
688
      Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
1257
      // Store the address of the original variable associated with the LHS
1258
      // implicit variable.
1259
688
      if (IsArray) {
1260
105
        OriginalAddr = Builder.CreateElementBitCast(
1261
105
            OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1262
105
      }
1263
688
      PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; });
1264
688
      PrivateScope.addPrivate(
1265
688
          RHSVD, [this, PrivateVD, RHSVD, IsArray]() {
1266
688
            return IsArray
1267
105
                       ? Builder.CreateElementBitCast(
1268
105
                             GetAddrOfLocalVar(PrivateVD),
1269
105
                             ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
1270
583
                       : GetAddrOfLocalVar(PrivateVD);
1271
688
          });
1272
688
    }
1273
796
    ++ILHS;
1274
796
    ++IRHS;
1275
796
    ++IPriv;
1276
796
    ++Count;
1277
796
  }
1278
26.2k
  if (!Data.ReductionVars.empty()) {
1279
26
    Data.IsReductionWithTaskMod = true;
1280
26
    Data.IsWorksharingReduction =
1281
26
        isOpenMPWorksharingDirective(D.getDirectiveKind());
1282
26
    llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit(
1283
26
        *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data);
1284
26
    const Expr *TaskRedRef = nullptr;
1285
26
    switch (D.getDirectiveKind()) {
1286
2
    case OMPD_parallel:
1287
2
      TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr();
1288
2
      break;
1289
2
    case OMPD_for:
1290
2
      TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr();
1291
2
      break;
1292
2
    case OMPD_sections:
1293
2
      TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr();
1294
2
      break;
1295
2
    case OMPD_parallel_for:
1296
2
      TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr();
1297
2
      break;
1298
2
    case OMPD_parallel_master:
1299
2
      TaskRedRef =
1300
2
          cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr();
1301
2
      break;
1302
2
    case OMPD_parallel_sections:
1303
2
      TaskRedRef =
1304
2
          cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr();
1305
2
      break;
1306
2
    case OMPD_target_parallel:
1307
2
      TaskRedRef =
1308
2
          cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr();
1309
2
      break;
1310
2
    case OMPD_target_parallel_for:
1311
2
      TaskRedRef =
1312
2
          cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr();
1313
2
      break;
1314
2
    case OMPD_distribute_parallel_for:
1315
2
      TaskRedRef =
1316
2
          cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr();
1317
2
      break;
1318
4
    case OMPD_teams_distribute_parallel_for:
1319
4
      TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D)
1320
4
                       .getTaskReductionRefExpr();
1321
4
      break;
1322
4
    case OMPD_target_teams_distribute_parallel_for:
1323
4
      TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D)
1324
4
                       .getTaskReductionRefExpr();
1325
4
      break;
1326
0
    case OMPD_simd:
1327
0
    case OMPD_for_simd:
1328
0
    case OMPD_section:
1329
0
    case OMPD_single:
1330
0
    case OMPD_master:
1331
0
    case OMPD_critical:
1332
0
    case OMPD_parallel_for_simd:
1333
0
    case OMPD_task:
1334
0
    case OMPD_taskyield:
1335
0
    case OMPD_barrier:
1336
0
    case OMPD_taskwait:
1337
0
    case OMPD_taskgroup:
1338
0
    case OMPD_flush:
1339
0
    case OMPD_depobj:
1340
0
    case OMPD_scan:
1341
0
    case OMPD_ordered:
1342
0
    case OMPD_atomic:
1343
0
    case OMPD_teams:
1344
0
    case OMPD_target:
1345
0
    case OMPD_cancellation_point:
1346
0
    case OMPD_cancel:
1347
0
    case OMPD_target_data:
1348
0
    case OMPD_target_enter_data:
1349
0
    case OMPD_target_exit_data:
1350
0
    case OMPD_taskloop:
1351
0
    case OMPD_taskloop_simd:
1352
0
    case OMPD_master_taskloop:
1353
0
    case OMPD_master_taskloop_simd:
1354
0
    case OMPD_parallel_master_taskloop:
1355
0
    case OMPD_parallel_master_taskloop_simd:
1356
0
    case OMPD_distribute:
1357
0
    case OMPD_target_update:
1358
0
    case OMPD_distribute_parallel_for_simd:
1359
0
    case OMPD_distribute_simd:
1360
0
    case OMPD_target_parallel_for_simd:
1361
0
    case OMPD_target_simd:
1362
0
    case OMPD_teams_distribute:
1363
0
    case OMPD_teams_distribute_simd:
1364
0
    case OMPD_teams_distribute_parallel_for_simd:
1365
0
    case OMPD_target_teams:
1366
0
    case OMPD_target_teams_distribute:
1367
0
    case OMPD_target_teams_distribute_parallel_for_simd:
1368
0
    case OMPD_target_teams_distribute_simd:
1369
0
    case OMPD_declare_target:
1370
0
    case OMPD_end_declare_target:
1371
0
    case OMPD_threadprivate:
1372
0
    case OMPD_allocate:
1373
0
    case OMPD_declare_reduction:
1374
0
    case OMPD_declare_mapper:
1375
0
    case OMPD_declare_simd:
1376
0
    case OMPD_requires:
1377
0
    case OMPD_declare_variant:
1378
0
    case OMPD_begin_declare_variant:
1379
0
    case OMPD_end_declare_variant:
1380
0
    case OMPD_unknown:
1381
0
    default:
1382
0
      llvm_unreachable("Enexpected directive with task reductions.");
1383
26
    }
1384
26
1385
26
    const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
1386
26
    EmitVarDecl(*VD);
1387
26
    EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD),
1388
26
                      /*Volatile=*/false, TaskRedRef->getType());
1389
26
  }
1390
26.2k
}
1391
1392
void CodeGenFunction::EmitOMPReductionClauseFinal(
1393
15.7k
    const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1394
15.7k
  if (!HaveInsertPoint())
1395
2
    return;
1396
15.7k
  llvm::SmallVector<const Expr *, 8> Privates;
1397
15.7k
  llvm::SmallVector<const Expr *, 8> LHSExprs;
1398
15.7k
  llvm::SmallVector<const Expr *, 8> RHSExprs;
1399
15.7k
  llvm::SmallVector<const Expr *, 8> ReductionOps;
1400
15.7k
  bool HasAtLeastOneReduction = false;
1401
15.7k
  bool IsReductionWithTaskMod = false;
1402
699
  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1403
    // Do not emit for inscan reductions.
1404
699
    if (C->getModifier() == OMPC_REDUCTION_inscan)
1405
48
      continue;
1406
651
    HasAtLeastOneReduction = true;
1407
651
    Privates.append(C->privates().begin(), C->privates().end());
1408
651
    LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1409
651
    RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1410
651
    ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1411
651
    IsReductionWithTaskMod =
1412
651
        IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task;
1413
651
  }
1414
15.7k
  if (HasAtLeastOneReduction) {
1415
551
    if (IsReductionWithTaskMod) {
1416
26
      CGM.getOpenMPRuntime().emitTaskReductionFini(
1417
26
          *this, D.getBeginLoc(),
1418
26
          isOpenMPWorksharingDirective(D.getDirectiveKind()));
1419
26
    }
1420
551
    bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1421
545
                      isOpenMPParallelDirective(D.getDirectiveKind()) ||
1422
274
                      ReductionKind == OMPD_simd;
1423
551
    bool SimpleReduction = ReductionKind == OMPD_simd;
1424
    // Emit nowait reduction if nowait clause is present or directive is a
1425
    // parallel directive (it always has implicit barrier).
1426
551
    CGM.getOpenMPRuntime().emitReduction(
1427
551
        *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1428
551
        {WithNowait, SimpleReduction, ReductionKind});
1429
551
  }
1430
15.7k
}
1431
1432
static void emitPostUpdateForReductionClause(
1433
    CodeGenFunction &CGF, const OMPExecutableDirective &D,
1434
15.7k
    const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1435
15.7k
  if (!CGF.HaveInsertPoint())
1436
0
    return;
1437
15.7k
  llvm::BasicBlock *DoneBB = nullptr;
1438
707
  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1439
707
    if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1440
4
      if (!DoneBB) {
1441
4
        if (llvm::Value *Cond = CondGen(CGF)) {
1442
          // If the first post-update expression is found, emit conditional
1443
          // block if it was requested.
1444
0
          llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1445
0
          DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1446
0
          CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1447
0
          CGF.EmitBlock(ThenBB);
1448
0
        }
1449
4
      }
1450
4
      CGF.EmitIgnoredExpr(PostUpdate);
1451
4
    }
1452
707
  }
1453
15.7k
  if (DoneBB)
1454
0
    CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1455
15.7k
}
1456
1457
namespace {
1458
/// Codegen lambda for appending distribute lower and upper bounds to outlined
1459
/// parallel function. This is necessary for combined constructs such as
1460
/// 'distribute parallel for'
1461
typedef llvm::function_ref<void(CodeGenFunction &,
1462
                                const OMPExecutableDirective &,
1463
                                llvm::SmallVectorImpl<llvm::Value *> &)>
1464
    CodeGenBoundParametersTy;
1465
} // anonymous namespace
1466
1467
static void
1468
checkForLastprivateConditionalUpdate(CodeGenFunction &CGF,
1469
15.3k
                                     const OMPExecutableDirective &S) {
1470
15.3k
  if (CGF.getLangOpts().OpenMP < 50)
1471
3.84k
    return;
1472
11.5k
  llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls;
1473
702
  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
1474
856
    for (const Expr *Ref : C->varlists()) {
1475
856
      if (!Ref->getType()->isScalarType())
1476
421
        continue;
1477
435
      const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1478
435
      if (!DRE)
1479
0
        continue;
1480
435
      PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1481
435
      CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1482
435
    }
1483
702
  }
1484
295
  for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
1485
1.21k
    for (const Expr *Ref : C->varlists()) {
1486
1.21k
      if (!Ref->getType()->isScalarType())
1487
650
        continue;
1488
567
      const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1489
567
      if (!DRE)
1490
0
        continue;
1491
567
      PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1492
567
      CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1493
567
    }
1494
295
  }
1495
287
  for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
1496
358
    for (const Expr *Ref : C->varlists()) {
1497
358
      if (!Ref->getType()->isScalarType())
1498
0
        continue;
1499
358
      const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1500
358
      if (!DRE)
1501
0
        continue;
1502
358
      PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1503
358
      CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1504
358
    }
1505
287
  }
1506
  // Privates should ne analyzed since they are not captured at all.
1507
  // Task reductions may be skipped - tasks are ignored.
1508
  // Firstprivates do not return value but may be passed by reference - no need
1509
  // to check for updated lastprivate conditional.
1510
3.96k
  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
1511
6.40k
    for (const Expr *Ref : C->varlists()) {
1512
6.40k
      if (!Ref->getType()->isScalarType())
1513
845
        continue;
1514
5.55k
      const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1515
5.55k
      if (!DRE)
1516
0
        continue;
1517
5.55k
      PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1518
5.55k
    }
1519
3.96k
  }
1520
11.5k
  CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional(
1521
11.5k
      CGF, S, PrivateDecls);
1522
11.5k
}
1523
1524
static void emitCommonOMPParallelDirective(
1525
    CodeGenFunction &CGF, const OMPExecutableDirective &S,
1526
    OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1527
5.80k
    const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1528
5.80k
  const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1529
5.80k
  llvm::Function *OutlinedFn =
1530
5.80k
      CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1531
5.80k
          S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1532
5.80k
  if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1533
259
    CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1534
259
    llvm::Value *NumThreads =
1535
259
        CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1536
259
                           /*IgnoreResultAssign=*/true);
1537
259
    CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1538
259
        CGF, NumThreads, NumThreadsClause->getBeginLoc());
1539
259
  }
1540
5.80k
  if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1541
114
    CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1542
114
    CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1543
114
        CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1544
114
  }
1545
5.80k
  const Expr *IfCond = nullptr;
1546
1.03k
  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1547
1.03k
    if (C->getNameModifier() == OMPD_unknown ||
1548
744
        C->getNameModifier() == OMPD_parallel) {
1549
432
      IfCond = C->getCondition();
1550
432
      break;
1551
432
    }
1552
1.03k
  }
1553
5.80k
1554
5.80k
  OMPParallelScope Scope(CGF, S);
1555
5.80k
  llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1556
  // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1557
  // lower and upper bounds with the pragma 'for' chunking mechanism.
1558
  // The following lambda takes care of appending the lower and upper bound
1559
  // parameters when necessary
1560
5.80k
  CodeGenBoundParameters(CGF, S, CapturedVars);
1561
5.80k
  CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1562
5.80k
  CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1563
5.80k
                                              CapturedVars, IfCond);
1564
5.80k
}
1565
1566
238
static bool isAllocatableDecl(const VarDecl *VD) {
1567
238
  const VarDecl *CVD = VD->getCanonicalDecl();
1568
238
  if (!CVD->hasAttr<OMPAllocateDeclAttr>())
1569
234
    return false;
1570
4
  const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1571
  // Use the default allocation.
1572
4
  return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
1573
4
            AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
1574
0
           !AA->getAllocator());
1575
4
}
1576
1577
static void emitEmptyBoundParameters(CodeGenFunction &,
1578
                                     const OMPExecutableDirective &,
1579
3.27k
                                     llvm::SmallVectorImpl<llvm::Value *> &) {}
1580
1581
Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
1582
222
    CodeGenFunction &CGF, const VarDecl *VD) {
1583
222
  CodeGenModule &CGM = CGF.CGM;
1584
222
  auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1585
222
1586
222
  if (!VD)
1587
0
    return Address::invalid();
1588
222
  const VarDecl *CVD = VD->getCanonicalDecl();
1589
222
  if (!isAllocatableDecl(CVD))
1590
222
    return Address::invalid();
1591
0
  llvm::Value *Size;
1592
0
  CharUnits Align = CGM.getContext().getDeclAlign(CVD);
1593
0
  if (CVD->getType()->isVariablyModifiedType()) {
1594
0
    Size = CGF.getTypeSize(CVD->getType());
1595
    // Align the size: ((size + align - 1) / align) * align
1596
0
    Size = CGF.Builder.CreateNUWAdd(
1597
0
        Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
1598
0
    Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
1599
0
    Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
1600
0
  } else {
1601
0
    CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
1602
0
    Size = CGM.getSize(Sz.alignTo(Align));
1603
0
  }
1604
0
1605
0
  const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1606
0
  assert(AA->getAllocator() &&
1607
0
         "Expected allocator expression for non-default allocator.");
1608
0
  llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
1609
  // According to the standard, the original allocator type is a enum (integer).
1610
  // Convert to pointer type, if required.
1611
0
  if (Allocator->getType()->isIntegerTy())
1612
0
    Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
1613
0
  else if (Allocator->getType()->isPointerTy())
1614
0
    Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
1615
0
                                                                CGM.VoidPtrTy);
1616
0
1617
0
  llvm::Value *Addr = OMPBuilder.CreateOMPAlloc(
1618
0
      CGF.Builder, Size, Allocator,
1619
0
      getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
1620
0
  llvm::CallInst *FreeCI =
1621
0
      OMPBuilder.CreateOMPFree(CGF.Builder, Addr, Allocator);
1622
0
1623
0
  CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
1624
0
  Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1625
0
      Addr,
1626
0
      CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
1627
0
      getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
1628
0
  return Address(Addr, Align);
1629
0
}
1630
1631
Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
1632
    CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr,
1633
0
    SourceLocation Loc) {
1634
0
  CodeGenModule &CGM = CGF.CGM;
1635
0
  if (CGM.getLangOpts().OpenMPUseTLS &&
1636
0
      CGM.getContext().getTargetInfo().isTLSSupported())
1637
0
    return VDAddr;
1638
0
1639
0
  llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1640
0
1641
0
  llvm::Type *VarTy = VDAddr.getElementType();
1642
0
  llvm::Value *Data =
1643
0
      CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
1644
0
  llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
1645
0
  std::string Suffix = getNameWithSeparators({"cache", ""});
1646
0
  llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
1647
0
1648
0
  llvm::CallInst *ThreadPrivateCacheCall =
1649
0
      OMPBuilder.CreateCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
1650
0
1651
0
  return Address(ThreadPrivateCacheCall, VDAddr.getAlignment());
1652
0
}
1653
1654
std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
1655
0
    ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) {
1656
0
  SmallString<128> Buffer;
1657
0
  llvm::raw_svector_ostream OS(Buffer);
1658
0
  StringRef Sep = FirstSeparator;
1659
0
  for (StringRef Part : Parts) {
1660
0
    OS << Sep << Part;
1661
0
    Sep = Separator;
1662
0
  }
1663
0
  return OS.str().str();
1664
0
}
1665
937
void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1666
937
  if (CGM.getLangOpts().OpenMPIRBuilder) {
1667
34
    llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1668
    // Check if we have any if clause associated with the directive.
1669
34
    llvm::Value *IfCond = nullptr;
1670
34
    if (const auto *C = S.getSingleClause<OMPIfClause>())
1671
0
      IfCond = EmitScalarExpr(C->getCondition(),
1672
0
                              /*IgnoreResultAssign=*/true);
1673
34
1674
34
    llvm::Value *NumThreads = nullptr;
1675
34
    if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>())
1676
0
      NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(),
1677
0
                                  /*IgnoreResultAssign=*/true);
1678
34
1679
34
    ProcBindKind ProcBind = OMP_PROC_BIND_default;
1680
34
    if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>())
1681
0
      ProcBind = ProcBindClause->getProcBindKind();
1682
34
1683
34
    using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1684
34
1685
    // The cleanup callback that finalizes all variabels at the given location,
1686
    // thus calls destructors etc.
1687
42
    auto FiniCB = [this](InsertPointTy IP) {
1688
42
      OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
1689
42
    };
1690
34
1691
    // Privatization callback that performs appropriate action for
1692
    // shared/private/firstprivate/lastprivate/copyin/... variables.
1693
    //
1694
    // TODO: This defaults to shared right now.
1695
34
    auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1696
68
                     llvm::Value &Val, llvm::Value *&ReplVal) {
1697
      // The next line is appropriate only for variables (Val) with the
1698
      // data-sharing attribute "shared".
1699
68
      ReplVal = &Val;
1700
68
1701
68
      return CodeGenIP;
1702
68
    };
1703
34
1704
34
    const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1705
34
    const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
1706
34
1707
34
    auto BodyGenCB = [ParallelRegionBodyStmt,
1708
34
                      this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1709
34
                            llvm::BasicBlock &ContinuationBB) {
1710
34
      OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP,
1711
34
                                                      ContinuationBB);
1712
34
      OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt,
1713
34
                                             CodeGenIP, ContinuationBB);
1714
34
    };
1715
34
1716
34
    CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
1717
34
    CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
1718
34
    llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
1719
34
        AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
1720
34
    Builder.restoreIP(
1721
34
        OMPBuilder.CreateParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB,
1722
34
                                  IfCond, NumThreads, ProcBind, S.hasCancel()));
1723
34
    return;
1724
34
  }
1725
903
1726
  // Emit parallel region as a standalone region.
1727
903
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1728
903
    Action.Enter(CGF);
1729
903
    OMPPrivateScope PrivateScope(CGF);
1730
903
    bool Copyins = CGF.EmitOMPCopyinClause(S);
1731
903
    (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1732
903
    if (Copyins) {
1733
      // Emit implicit barrier to synchronize threads and avoid data races on
1734
      // propagation master's thread values of threadprivate variables to local
1735
      // instances of that variables of all other implicit threads.
1736
23
      CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1737
23
          CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
1738
23
          /*ForceSimpleCall=*/true);
1739
23
    }
1740
903
    CGF.EmitOMPPrivateClause(S, PrivateScope);
1741
903
    CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1742
903
    (void)PrivateScope.Privatize();
1743
903
    CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1744
903
    CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1745
903
  };
1746
903
  {
1747
903
    auto LPCRegion =
1748
903
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
1749
903
    emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1750
903
                                   emitEmptyBoundParameters);
1751
903
    emitPostUpdateForReductionClause(*this, S,
1752
4
                                     [](CodeGenFunction &) { return nullptr; });
1753
903
  }
1754
  // Check for outer lastprivate conditional update.
1755
903
  checkForLastprivateConditionalUpdate(*this, S);
1756
903
}
1757
1758
static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
1759
11.0k
                     int MaxLevel, int Level = 0) {
1760
11.0k
  assert(Level < MaxLevel && "Too deep lookup during loop body codegen.");
1761
11.0k
  const Stmt *SimplifiedS = S->IgnoreContainers();
1762
11.0k
  if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) {
1763
6
    PrettyStackTraceLoc CrashInfo(
1764
6
        CGF.getContext().getSourceManager(), CS->getLBracLoc(),
1765
6
        "LLVM IR generation of compound statement ('{}')");
1766
6
1767
    // Keep track of the current cleanup stack depth, including debug scopes.
1768
6
    CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange());
1769
6
    for (const Stmt *CurStmt : CS->body())
1770
30
      emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level);
1771
6
    return;
1772
6
  }
1773
11.0k
  if (SimplifiedS == NextLoop) {
1774
10.9k
    if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
1775
10.9k
      S = For->getBody();
1776
6
    } else {
1777
6
      assert(isa<CXXForRangeStmt>(SimplifiedS) &&
1778
6
             "Expected canonical for loop or range-based for loop.");
1779
6
      const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS);
1780
6
      CGF.EmitStmt(CXXFor->getLoopVarStmt());
1781
6
      S = CXXFor->getBody();
1782
6
    }
1783
10.9k
    if (Level + 1 < MaxLevel) {
1784
507
      NextLoop = OMPLoopDirective::tryToFindNextInnerLoop(
1785
507
          S, /*TryImperfectlyNestedLoops=*/true);
1786
507
      emitBody(CGF, S, NextLoop, MaxLevel, Level + 1);
1787
507
      return;
1788
507
    }
1789
10.5k
  }
1790
10.5k
  CGF.EmitStmt(S);
1791
10.5k
}
1792
1793
void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1794
10.4k
                                      JumpDest LoopExit) {
1795
10.4k
  RunCleanupsScope BodyScope(*this);
1796
  // Update counters values on current iteration.
1797
10.4k
  for (const Expr *UE : D.updates())
1798
10.9k
    EmitIgnoredExpr(UE);
1799
  // Update the linear variables.
1800
  // In distribute directives only loop counters may be marked as linear, no
1801
  // need to generate the code for them.
1802
10.4k
  if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1803
404
    for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1804
404
      for (const Expr *UE : C->updates())
1805
526
        EmitIgnoredExpr(UE);
1806
404
    }
1807
4.29k
  }
1808
10.4k
1809
  // On a continue in the body, jump to the end.
1810
10.4k
  JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1811
10.4k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1812
10.9k
  for (const Expr *E : D.finals_conditions()) {
1813
10.9k
    if (!E)
1814
10.9k
      continue;
1815
    // Check that loop counter in non-rectangular nest fits into the iteration
1816
    // space.
1817
20
    llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next");
1818
20
    EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(),
1819
20
                         getProfileCount(D.getBody()));
1820
20
    EmitBlock(NextBB);
1821
20
  }
1822
10.4k
1823
10.4k
  OMPPrivateScope InscanScope(*this);
1824
10.4k
  EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true);
1825
10.4k
  bool IsInscanRegion = InscanScope.Privatize();
1826
10.4k
  if (IsInscanRegion) {
1827
    // Need to remember the block before and after scan directive
1828
    // to dispatch them correctly depending on the clause used in
1829
    // this directive, inclusive or exclusive. For inclusive scan the natural
1830
    // order of the blocks is used, for exclusive clause the blocks must be
1831
    // executed in reverse order.
1832
48
    OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb");
1833
48
    OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb");
1834
    // No need to allocate inscan exit block, in simd mode it is selected in the
1835
    // codegen for the scan directive.
1836
48
    if (D.getDirectiveKind() != OMPD_simd && 
!getLangOpts().OpenMPSimd40
)
1837
32
      OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb");
1838
48
    OMPScanDispatch = createBasicBlock("omp.inscan.dispatch");
1839
48
    EmitBranch(OMPScanDispatch);
1840
48
    EmitBlock(OMPBeforeScanBlock);
1841
48
  }
1842
10.4k
1843
  // Emit loop variables for C++ range loops.
1844
10.4k
  const Stmt *Body =
1845
10.4k
      D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
1846
  // Emit loop body.
1847
10.4k
  emitBody(*this, Body,
1848
10.4k
           OMPLoopDirective::tryToFindNextInnerLoop(
1849
10.4k
               Body, /*TryImperfectlyNestedLoops=*/true),
1850
10.4k
           D.getCollapsedNumber());
1851
10.4k
1852
  // Jump to the dispatcher at the end of the loop body.
1853
10.4k
  if (IsInscanRegion)
1854
48
    EmitBranch(OMPScanExitBlock);
1855
10.4k
1856
  // The end (updates/cleanups).
1857
10.4k
  EmitBlock(Continue.getBlock());
1858
10.4k
  BreakContinueStack.pop_back();
1859
10.4k
}
1860
1861
void CodeGenFunction::EmitOMPInnerLoop(
1862
    const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
1863
    const Expr *IncExpr,
1864
    const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
1865
13.0k
    const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
1866
13.0k
  auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1867
13.0k
1868
  // Start the loop with a block that tests the condition.
1869
13.0k
  auto CondBlock = createBasicBlock("omp.inner.for.cond");
1870
13.0k
  EmitBlock(CondBlock);
1871
13.0k
  const SourceRange R = S.getSourceRange();
1872
13.0k
1873
  // If attributes are attached, push to the basic block with them.
1874
13.0k
  const auto &OMPED = cast<OMPExecutableDirective>(S);
1875
13.0k
  const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
1876
13.0k
  const Stmt *SS = ICS->getCapturedStmt();
1877
13.0k
  const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
1878
13.0k
  if (AS)
1879
1
    LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
1880
1
                   AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
1881
1
                   SourceLocToDebugLoc(R.getEnd()));
1882
13.0k
  else
1883
13.0k
    LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1884
13.0k
                   SourceLocToDebugLoc(R.getEnd()));
1885
13.0k
1886
  // If there are any cleanups between here and the loop-exit scope,
1887
  // create a block to stage a loop exit along.
1888
13.0k
  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1889
13.0k
  if (RequiresCleanup)
1890
850
    ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1891
13.0k
1892
13.0k
  llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
1893
13.0k
1894
  // Emit condition.
1895
13.0k
  EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1896
13.0k
  if (ExitBlock != LoopExit.getBlock()) {
1897
850
    EmitBlock(ExitBlock);
1898
850
    EmitBranchThroughCleanup(LoopExit);
1899
850
  }
1900
13.0k
1901
13.0k
  EmitBlock(LoopBody);
1902
13.0k
  incrementProfileCounter(&S);
1903
13.0k
1904
  // Create a block for the increment.
1905
13.0k
  JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1906
13.0k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1907
13.0k
1908
13.0k
  BodyGen(*this);
1909
13.0k
1910
  // Emit "IV = IV + 1" and a back-edge to the condition block.
1911
13.0k
  EmitBlock(Continue.getBlock());
1912
13.0k
  EmitIgnoredExpr(IncExpr);
1913
13.0k
  PostIncGen(*this);
1914
13.0k
  BreakContinueStack.pop_back();
1915
13.0k
  EmitBranch(CondBlock);
1916
13.0k
  LoopStack.pop();
1917
  // Emit the fall-through block.
1918
13.0k
  EmitBlock(LoopExit.getBlock());
1919
13.0k
}
1920
1921
8.58k
bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1922
8.58k
  if (!HaveInsertPoint())
1923
0
    return false;
1924
  // Emit inits for the linear variables.
1925
8.58k
  bool HasLinears = false;
1926
500
  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1927
622
    for (const Expr *Init : C->inits()) {
1928
622
      HasLinears = true;
1929
622
      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1930
622
      if (const auto *Ref =
1931
622
              dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1932
622
        AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1933
622
        const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1934
622
        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1935
622
                        CapturedStmtInfo->lookup(OrigVD) != nullptr,
1936
622
                        VD->getInit()->getType(), VK_LValue,
1937
622
                        VD->getInit()->getExprLoc());
1938
622
        EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1939
622
                                                VD->getType()),
1940
622
                       /*capturedByInit=*/false);
1941
622
        EmitAutoVarCleanups(Emission);
1942
0
      } else {
1943
0
        EmitVarDecl(*VD);
1944
0
      }
1945
622
    }
1946
    // Emit the linear steps for the linear clauses.
1947
    // If a step is not constant, it is pre-calculated before the loop.
1948
500
    if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1949
154
      if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1950
154
        EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1951
        // Emit calculation of the linear step.
1952
154
        EmitIgnoredExpr(CS);
1953
154
      }
1954
500
  }
1955
8.58k
  return HasLinears;
1956
8.58k
}
1957
1958
void CodeGenFunction::EmitOMPLinearClauseFinal(
1959
    const OMPLoopDirective &D,
1960
8.58k
    const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1961
8.58k
  if (!HaveInsertPoint())
1962
0
    return;
1963
8.58k
  llvm::BasicBlock *DoneBB = nullptr;
1964
  // Emit the final values of the linear variables.
1965
500
  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1966
500
    auto IC = C->varlist_begin();
1967
622
    for (const Expr *F : C->finals()) {
1968
622
      if (!DoneBB) {
1969
547
        if (llvm::Value *Cond = CondGen(*this)) {
1970
          // If the first post-update expression is found, emit conditional
1971
          // block if it was requested.
1972
135
          llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
1973
135
          DoneBB = createBasicBlock(".omp.linear.pu.done");
1974
135
          Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1975
135
          EmitBlock(ThenBB);
1976
135
        }
1977
547
      }
1978
622
      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1979
622
      DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1980
622
                      CapturedStmtInfo->lookup(OrigVD) != nullptr,
1981
622
                      (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1982
622
      Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
1983
622
      CodeGenFunction::OMPPrivateScope VarScope(*this);
1984
622
      VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
1985
622
      (void)VarScope.Privatize();
1986
622
      EmitIgnoredExpr(F);
1987
622
      ++IC;
1988
622
    }
1989
500
    if (const Expr *PostUpdate = C->getPostUpdateExpr())
1990
4
      EmitIgnoredExpr(PostUpdate);
1991
500
  }
1992
8.58k
  if (DoneBB)
1993
135
    EmitBlock(DoneBB, /*IsFinished=*/true);
1994
8.58k
}
1995
1996
static void emitAlignedClause(CodeGenFunction &CGF,
1997
12.5k
                              const OMPExecutableDirective &D) {
1998
12.5k
  if (!CGF.HaveInsertPoint())
1999
0
    return;
2000
12.5k
  for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
2001
318
    llvm::APInt ClauseAlignment(64, 0);
2002
318
    if (const Expr *AlignmentExpr = Clause->getAlignment()) {
2003
114
      auto *AlignmentCI =
2004
114
          cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
2005
114
      ClauseAlignment = AlignmentCI->getValue();
2006
114
    }
2007
374
    for (const Expr *E : Clause->varlists()) {
2008
374
      llvm::APInt Alignment(ClauseAlignment);
2009
374
      if (Alignment == 0) {
2010
        // OpenMP [2.8.1, Description]
2011
        // If no optional parameter is specified, implementation-defined default
2012
        // alignments for SIMD instructions on the target platforms are assumed.
2013
252
        Alignment =
2014
252
            CGF.getContext()
2015
252
                .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2016
252
                    E->getType()->getPointeeType()))
2017
252
                .getQuantity();
2018
252
      }
2019
374
      assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2020
374
             "alignment is not power of 2");
2021
374
      if (Alignment != 0) {
2022
374
        llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
2023
374
        CGF.emitAlignmentAssumption(
2024
374
            PtrValue, E, /*No second loc needed*/ SourceLocation(),
2025
374
            llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
2026
374
      }
2027
374
    }
2028
318
  }
2029
12.5k
}
2030
2031
void CodeGenFunction::EmitOMPPrivateLoopCounters(
2032
15.2k
    const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
2033
15.2k
  if (!HaveInsertPoint())
2034
0
    return;
2035
15.2k
  auto I = S.private_counters().begin();
2036
16.0k
  for (const Expr *E : S.counters()) {
2037
16.0k
    const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2038
16.0k
    const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
2039
    // Emit var without initialization.
2040
16.0k
    AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
2041
16.0k
    EmitAutoVarCleanups(VarEmission);
2042
16.0k
    LocalDeclMap.erase(PrivateVD);
2043
16.0k
    (void)LoopScope.addPrivate(VD, [&VarEmission]() {
2044
16.0k
      return VarEmission.getAllocatedAddress();
2045
16.0k
    });
2046
16.0k
    if (LocalDeclMap.count(VD) || 
CapturedStmtInfo->lookup(VD)15.3k
||
2047
15.3k
        VD->hasGlobalStorage()) {
2048
685
      (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
2049
685
        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
2050
685
                        LocalDeclMap.count(VD) || 
CapturedStmtInfo->lookup(VD)31
,
2051
685
                        E->getType(), VK_LValue, E->getExprLoc());
2052
685
        return EmitLValue(&DRE).getAddress(*this);
2053
685
      });
2054
15.3k
    } else {
2055
15.3k
      (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() {
2056
15.3k
        return VarEmission.getAllocatedAddress();
2057
15.3k
      });
2058
15.3k
    }
2059
16.0k
    ++I;
2060
16.0k
  }
2061
  // Privatize extra loop counters used in loops for ordered(n) clauses.
2062
80
  for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
2063
80
    if (!C->getNumForLoops())
2064
54
      continue;
2065
26
    for (unsigned I = S.getCollapsedNumber(),
2066
26
                  E = C->getLoopNumIterations().size();
2067
32
         I < E; 
++I6
) {
2068
6
      const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
2069
6
      const auto *VD = cast<VarDecl>(DRE->getDecl());
2070
      // Override only those variables that can be captured to avoid re-emission
2071
      // of the variables declared within the loops.
2072
6
      if (DRE->refersToEnclosingVariableOrCapture()) {
2073
4
        (void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
2074
4
          return CreateMemTemp(DRE->getType(), VD->getName());
2075
4
        });
2076
4
      }
2077
6
    }
2078
26
  }
2079
15.2k
}
2080
2081
static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
2082
                        const Expr *Cond, llvm::BasicBlock *TrueBlock,
2083
2.41k
                        llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
2084
2.41k
  if (!CGF.HaveInsertPoint())
2085
0
    return;
2086
2.41k
  {
2087
2.41k
    CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
2088
2.41k
    CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
2089
2.41k
    (void)PreCondScope.Privatize();
2090
    // Get initial values of real counters.
2091
2.62k
    for (const Expr *I : S.inits()) {
2092
2.62k
      CGF.EmitIgnoredExpr(I);
2093
2.62k
    }
2094
2.41k
  }
2095
  // Create temp loop control variables with their init values to support
2096
  // non-rectangular loops.
2097
2.41k
  CodeGenFunction::OMPMapVars PreCondVars;
2098
2.62k
  for (const Expr * E: S.dependent_counters()) {
2099
2.62k
    if (!E)
2100
2.62k
      continue;
2101
5
    assert(!E->getType().getNonReferenceType()->isRecordType() &&
2102
5
           "dependent counter must not be an iterator.");
2103
5
    const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2104
5
    Address CounterAddr =
2105
5
        CGF.CreateMemTemp(VD->getType().getNonReferenceType());
2106
5
    (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr);
2107
5
  }
2108
2.41k
  (void)PreCondVars.apply(CGF);
2109
2.62k
  for (const Expr *E : S.dependent_inits()) {
2110
2.62k
    if (!E)
2111
2.62k
      continue;
2112
5
    CGF.EmitIgnoredExpr(E);
2113
5
  }
2114
  // Check that loop is executed at least one time.
2115
2.41k
  CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
2116
2.41k
  PreCondVars.restore(CGF);
2117
2.41k
}
2118
2119
void CodeGenFunction::EmitOMPLinearClause(
2120
8.58k
    const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
2121
8.58k
  if (!HaveInsertPoint())
2122
0
    return;
2123
8.58k
  llvm::DenseSet<const VarDecl *> SIMDLCVs;
2124
8.58k
  if (isOpenMPSimdDirective(D.getDirectiveKind())) {
2125
6.17k
    const auto *LoopDirective = cast<OMPLoopDirective>(&D);
2126
6.47k
    for (const Expr *C : LoopDirective->counters()) {
2127
6.47k
      SIMDLCVs.insert(
2128
6.47k
          cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
2129
6.47k
    }
2130
6.17k
  }
2131
500
  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2132
500
    auto CurPrivate = C->privates().begin();
2133
622
    for (const Expr *E : C->varlists()) {
2134
622
      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2135
622
      const auto *PrivateVD =
2136
622
          cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
2137
622
      if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
2138
516
        bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() {
2139
          // Emit private VarDecl with copy init.
2140
516
          EmitVarDecl(*PrivateVD);
2141
516
          return GetAddrOfLocalVar(PrivateVD);
2142
516
        });
2143
516
        assert(IsRegistered && "linear var already registered as private");
2144
        // Silence the warning about unused variable.
2145
516
        (void)IsRegistered;
2146
106
      } else {
2147
106
        EmitVarDecl(*PrivateVD);
2148
106
      }
2149
622
      ++CurPrivate;
2150
622
    }
2151
500
  }
2152
8.58k
}
2153
2154
static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
2155
                                     const OMPExecutableDirective &D,
2156
8.29k
                                     bool IsMonotonic) {
2157
8.29k
  if (!CGF.HaveInsertPoint())
2158
0
    return;
2159
8.29k
  if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
2160
252
    RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2161
252
                                 /*ignoreResult=*/true);
2162
252
    auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2163
252
    CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2164
    // In presence of finite 'safelen', it may be unsafe to mark all
2165
    // the memory instructions parallel, because loop-carried
2166
    // dependences of 'safelen' iterations are possible.
2167
252
    if (!IsMonotonic)
2168
144
      CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
2169
8.03k
  } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
2170
168
    RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
2171
168
                                 /*ignoreResult=*/true);
2172
168
    auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2173
168
    CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2174
    // In presence of finite 'safelen', it may be unsafe to mark all
2175
    // the memory instructions parallel, because loop-carried
2176
    // dependences of 'safelen' iterations are possible.
2177
168
    CGF.LoopStack.setParallel(/*Enable=*/false);
2178
168
  }
2179
8.29k
}
2180
2181
void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
2182
8.29k
                                      bool IsMonotonic) {
2183
  // Walk clauses and process safelen/lastprivate.
2184
8.29k
  LoopStack.setParallel(!IsMonotonic);
2185
8.29k
  LoopStack.setVectorizeEnable();
2186
8.29k
  emitSimdlenSafelenClause(*this, D, IsMonotonic);
2187
8.29k
  if (const auto *C = D.getSingleClause<OMPOrderClause>())
2188
0
    if (C->getKind() == OMPC_ORDER_concurrent)
2189
0
      LoopStack.setParallel(/*Enable=*/true);
2190
8.29k
  if ((D.getDirectiveKind() == OMPD_simd ||
2191
7.95k
       (getLangOpts().OpenMPSimd &&
2192
3.05k
        isOpenMPSimdDirective(D.getDirectiveKind()))) &&
2193
3.38k
      llvm::any_of(D.getClausesOfKind<OMPReductionClause>(),
2194
95
                   [](const OMPReductionClause *C) {
2195
95
                     return C->getModifier() == OMPC_REDUCTION_inscan;
2196
95
                   }))
2197
    // Disable parallel access in case of prefix sum.
2198
16
    LoopStack.setParallel(/*Enable=*/false);
2199
8.29k
}
2200
2201
void CodeGenFunction::EmitOMPSimdFinal(
2202
    const OMPLoopDirective &D,
2203
8.25k
    const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2204
8.25k
  if (!HaveInsertPoint())
2205
0
    return;
2206
8.25k
  llvm::BasicBlock *DoneBB = nullptr;
2207
8.25k
  auto IC = D.counters().begin();
2208
8.25k
  auto IPC = D.private_counters().begin();
2209
8.61k
  for (const Expr *F : D.finals()) {
2210
8.61k
    const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
2211
8.61k
    const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
2212
8.61k
    const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
2213
8.61k
    if (LocalDeclMap.count(OrigVD) || 
CapturedStmtInfo->lookup(OrigVD)0
||
2214
8.61k
        
OrigVD->hasGlobalStorage()0
||
CED0
) {
2215
8.61k
      if (!DoneBB) {
2216
8.44k
        if (llvm::Value *Cond = CondGen(*this)) {
2217
          // If the first post-update expression is found, emit conditional
2218
          // block if it was requested.
2219
4.31k
          llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
2220
4.31k
          DoneBB = createBasicBlock(".omp.final.done");
2221
4.31k
          Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2222
4.31k
          EmitBlock(ThenBB);
2223
4.31k
        }
2224
8.44k
      }
2225
8.61k
      Address OrigAddr = Address::invalid();
2226
8.61k
      if (CED) {
2227
28
        OrigAddr =
2228
28
            EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
2229
8.58k
      } else {
2230
8.58k
        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
2231
8.58k
                        /*RefersToEnclosingVariableOrCapture=*/false,
2232
8.58k
                        (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
2233
8.58k
        OrigAddr = EmitLValue(&DRE).getAddress(*this);
2234
8.58k
      }
2235
8.61k
      OMPPrivateScope VarScope(*this);
2236
8.61k
      VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
2237
8.61k
      (void)VarScope.Privatize();
2238
8.61k
      EmitIgnoredExpr(F);
2239
8.61k
    }
2240
8.61k
    ++IC;
2241
8.61k
    ++IPC;
2242
8.61k
  }
2243
8.25k
  if (DoneBB)
2244
4.31k
    EmitBlock(DoneBB, /*IsFinished=*/true);
2245
8.25k
}
2246
2247
static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
2248
                                         const OMPLoopDirective &S,
2249
10.4k
                                         CodeGenFunction::JumpDest LoopExit) {
2250
10.4k
  CGF.EmitOMPLoopBody(S, LoopExit);
2251
10.4k
  CGF.EmitStopPoint(&S);
2252
10.4k
}
2253
2254
/// Emit a helper variable and return corresponding lvalue.
2255
static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2256
39.8k
                               const DeclRefExpr *Helper) {
2257
39.8k
  auto VDecl = cast<VarDecl>(Helper->getDecl());
2258
39.8k
  CGF.EmitVarDecl(*VDecl);
2259
39.8k
  return CGF.EmitLValue(Helper);
2260
39.8k
}
2261
2262
static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S,
2263
                               const RegionCodeGenTy &SimdInitGen,
2264
12.8k
                               const RegionCodeGenTy &BodyCodeGen) {
2265
12.8k
  auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF,
2266
12.7k
                                                    PrePostActionTy &) {
2267
12.7k
    CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S);
2268
12.7k
    CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
2269
12.7k
    SimdInitGen(CGF);
2270
12.7k
2271
12.7k
    BodyCodeGen(CGF);
2272
12.7k
  };
2273
283
  auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
2274
283
    CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
2275
283
    CGF.LoopStack.setVectorizeEnable(/*Enable=*/false);
2276
283
2277
283
    BodyCodeGen(CGF);
2278
283
  };
2279
12.8k
  const Expr *IfCond = nullptr;
2280
12.8k
  if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2281
1.53k
    for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2282
1.53k
      if (CGF.getLangOpts().OpenMP >= 50 &&
2283
855
          (C->getNameModifier() == OMPD_unknown ||
2284
524
           C->getNameModifier() == OMPD_simd)) {
2285
391
        IfCond = C->getCondition();
2286
391
        break;
2287
391
      }
2288
1.53k
    }
2289
8.37k
  }
2290
12.8k
  if (IfCond) {
2291
391
    CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2292
12.4k
  } else {
2293
12.4k
    RegionCodeGenTy ThenRCG(ThenGen);
2294
12.4k
    ThenRCG(CGF);
2295
12.4k
  }
2296
12.8k
}
2297
2298
static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
2299
4.02k
                              PrePostActionTy &Action) {
2300
4.02k
  Action.Enter(CGF);
2301
4.02k
  assert(isOpenMPSimdDirective(S.getDirectiveKind()) &&
2302
4.02k
         "Expected simd directive");
2303
4.02k
  OMPLoopScope PreInitScope(CGF, S);
2304
  // if (PreCond) {
2305
  //   for (IV in 0..LastIteration) BODY;
2306
  //   <Final counter/linear vars updates>;
2307
  // }
2308
  //
2309
4.02k
  if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
2310
2.17k
      isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
2311
2.66k
      
isOpenMPTaskLoopDirective(S.getDirectiveKind())1.47k
) {
2312
2.66k
    (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2313
2.66k
    (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2314
2.66k
  }
2315
4.02k
2316
  // Emit: if (PreCond) - begin.
2317
  // If the condition constant folds and can be elided, avoid emitting the
2318
  // whole loop.
2319
4.02k
  bool CondConstant;
2320
4.02k
  llvm::BasicBlock *ContBlock = nullptr;
2321
4.02k
  if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2322
3.39k
    if (!CondConstant)
2323
82
      return;
2324
631
  } else {
2325
631
    llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
2326
631
    ContBlock = CGF.createBasicBlock("simd.if.end");
2327
631
    emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
2328
631
                CGF.getProfileCount(&S));
2329
631
    CGF.EmitBlock(ThenBlock);
2330
631
    CGF.incrementProfileCounter(&S);
2331
631
  }
2332
4.02k
2333
  // Emit the loop iteration variable.
2334
3.94k
  const Expr *IVExpr = S.getIterationVariable();
2335
3.94k
  const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
2336
3.94k
  CGF.EmitVarDecl(*IVDecl);
2337
3.94k
  CGF.EmitIgnoredExpr(S.getInit());
2338
3.94k
2339
  // Emit the iterations count variable.
2340
  // If it is not a variable, Sema decided to calculate iterations count on
2341
  // each iteration (e.g., it is foldable into a constant).
2342
3.94k
  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2343
0
    CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2344
    // Emit calculation of the iterations count.
2345
0
    CGF.EmitIgnoredExpr(S.getCalcLastIteration());
2346
0
  }
2347
3.94k
2348
3.94k
  emitAlignedClause(CGF, S);
2349
3.94k
  (void)CGF.EmitOMPLinearClauseInit(S);
2350
3.94k
  {
2351
3.94k
    CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2352
3.94k
    CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
2353
3.94k
    CGF.EmitOMPLinearClause(S, LoopScope);
2354
3.94k
    CGF.EmitOMPPrivateClause(S, LoopScope);
2355
3.94k
    CGF.EmitOMPReductionClauseInit(S, LoopScope);
2356
3.94k
    CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
2357
3.94k
        CGF, S, CGF.EmitLValue(S.getIterationVariable()));
2358
3.94k
    bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2359
3.94k
    (void)LoopScope.Privatize();
2360
3.94k
    if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2361
2.61k
      CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
2362
3.94k
2363
3.94k
    emitCommonSimdLoop(
2364
3.94k
        CGF, S,
2365
3.88k
        [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2366
3.88k
          CGF.EmitOMPSimdInit(S);
2367
3.88k
        },
2368
4.04k
        [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2369
4.04k
          CGF.EmitOMPInnerLoop(
2370
4.04k
              S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
2371
4.04k
              [&S](CodeGenFunction &CGF) {
2372
4.04k
                emitOMPLoopBodyWithStopPoint(CGF, S,
2373
4.04k
                                             CodeGenFunction::JumpDest());
2374
4.04k
              },
2375
4.04k
              [](CodeGenFunction &) {});
2376
4.04k
        });
2377
4.12k
    CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
2378
    // Emit final copy of the lastprivate variables at the end of loops.
2379
3.94k
    if (HasLastprivateClause)
2380
127
      CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
2381
3.94k
    CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
2382
3.94k
    emitPostUpdateForReductionClause(CGF, S,
2383
0
                                     [](CodeGenFunction &) { return nullptr; });
2384
3.94k
  }
2385
412
  CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
2386
  // Emit: if (PreCond) - end.
2387
3.94k
  if (ContBlock) {
2388
631
    CGF.EmitBranch(ContBlock);
2389
631
    CGF.EmitBlock(ContBlock, true);
2390
631
  }
2391
3.94k
}
2392
2393
172
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
2394
172
  ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
2395
172
  OMPFirstScanLoop = true;
2396
172
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2397
172
    emitOMPSimdRegion(CGF, S, Action);
2398
172
  };
2399
172
  {
2400
172
    auto LPCRegion =
2401
172
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
2402
172
    OMPLexicalScope Scope(*this, S, OMPD_unknown);
2403
172
    CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2404
172
  }
2405
  // Check for outer lastprivate conditional update.
2406
172
  checkForLastprivateConditionalUpdate(*this, S);
2407
172
}
2408
2409
void CodeGenFunction::EmitOMPOuterLoop(
2410
    bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
2411
    CodeGenFunction::OMPPrivateScope &LoopScope,
2412
    const CodeGenFunction::OMPLoopArguments &LoopArgs,
2413
    const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
2414
1.21k
    const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
2415
1.21k
  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2416
1.21k
2417
1.21k
  const Expr *IVExpr = S.getIterationVariable();
2418
1.21k
  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2419
1.21k
  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2420
1.21k
2421
1.21k
  JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
2422
1.21k
2423
  // Start the loop with a block that tests the condition.
2424
1.21k
  llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
2425
1.21k
  EmitBlock(CondBlock);
2426
1.21k
  const SourceRange R = S.getSourceRange();
2427
1.21k
  LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2428
1.21k
                 SourceLocToDebugLoc(R.getEnd()));
2429
1.21k
2430
1.21k
  llvm::Value *BoolCondVal = nullptr;
2431
1.21k
  if (!DynamicOrOrdered) {
2432
    // UB = min(UB, GlobalUB) or
2433
    // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
2434
    // 'distribute parallel for')
2435
469
    EmitIgnoredExpr(LoopArgs.EUB);
2436
    // IV = LB
2437
469
    EmitIgnoredExpr(LoopArgs.Init);
2438
    // IV < UB
2439
469
    BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
2440
744
  } else {
2441
744
    BoolCondVal =
2442
744
        RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
2443
744
                       LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
2444
744
  }
2445
1.21k
2446
  // If there are any cleanups between here and the loop-exit scope,
2447
  // create a block to stage a loop exit along.
2448
1.21k
  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2449
1.21k
  if (LoopScope.requiresCleanups())
2450
32
    ExitBlock = createBasicBlock("omp.dispatch.cleanup");
2451
1.21k
2452
1.21k
  llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
2453
1.21k
  Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
2454
1.21k
  if (ExitBlock != LoopExit.getBlock()) {
2455
32
    EmitBlock(ExitBlock);
2456
32
    EmitBranchThroughCleanup(LoopExit);
2457
32
  }
2458
1.21k
  EmitBlock(LoopBody);
2459
1.21k
2460
  // Emit "IV = LB" (in case of static schedule, we have already calculated new
2461
  // LB for loop condition and emitted it above).
2462
1.21k
  if (DynamicOrOrdered)
2463
744
    EmitIgnoredExpr(LoopArgs.Init);
2464
1.21k
2465
  // Create a block for the increment.
2466
1.21k
  JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
2467
1.21k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2468
1.21k
2469
1.21k
  emitCommonSimdLoop(
2470
1.21k
      *this, S,
2471
1.21k
      [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
2472
        // Generate !llvm.loop.parallel metadata for loads and stores for loops
2473
        // with dynamic/guided scheduling and without ordered clause.
2474
1.21k
        if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2475
666
          CGF.LoopStack.setParallel(!IsMonotonic);
2476
666
          if (const auto *C = S.getSingleClause<OMPOrderClause>())
2477
0
            if (C->getKind() == OMPC_ORDER_concurrent)
2478
0
              CGF.LoopStack.setParallel(/*Enable=*/true);
2479
547
        } else {
2480
547
          CGF.EmitOMPSimdInit(S, IsMonotonic);
2481
547
        }
2482
1.21k
      },
2483
1.21k
      [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
2484
1.21k
       &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2485
1.21k
        SourceLocation Loc = S.getBeginLoc();
2486
        // when 'distribute' is not combined with a 'for':
2487
        // while (idx <= UB) { BODY; ++idx; }
2488
        // when 'distribute' is combined with a 'for'
2489
        // (e.g. 'distribute parallel for')
2490
        // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
2491
1.21k
        CGF.EmitOMPInnerLoop(
2492
1.21k
            S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
2493
1.21k
            [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
2494
1.21k
              CodeGenLoop(CGF, S, LoopExit);
2495
1.21k
            },
2496
1.21k
            [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
2497
1.21k
              CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
2498
1.21k
            });
2499
1.21k
      });
2500
1.21k
2501
1.21k
  EmitBlock(Continue.getBlock());
2502
1.21k
  BreakContinueStack.pop_back();
2503
1.21k
  if (!DynamicOrOrdered) {
2504
    // Emit "LB = LB + Stride", "UB = UB + Stride".
2505
469
    EmitIgnoredExpr(LoopArgs.NextLB);
2506
469
    EmitIgnoredExpr(LoopArgs.NextUB);
2507
469
  }
2508
1.21k
2509
1.21k
  EmitBranch(CondBlock);
2510
1.21k
  LoopStack.pop();
2511
  // Emit the fall-through block.
2512
1.21k
  EmitBlock(LoopExit.getBlock());
2513
1.21k
2514
  // Tell the runtime we are done.
2515
1.21k
  auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
2516
1.21k
    if (!DynamicOrOrdered)
2517
469
      CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2518
469
                                                     S.getDirectiveKind());
2519
1.21k
  };
2520
1.21k
  OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2521
1.21k
}
2522
2523
void CodeGenFunction::EmitOMPForOuterLoop(
2524
    const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
2525
    const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
2526
    const OMPLoopArguments &LoopArgs,
2527
1.04k
    const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2528
1.04k
  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2529
1.04k
2530
  // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
2531
1.04k
  const bool DynamicOrOrdered =
2532
1.04k
      Ordered || 
RT.isDynamic(ScheduleKind.Schedule)1.01k
;
2533
1.04k
2534
1.04k
  assert((Ordered ||
2535
1.04k
          !RT.isStaticNonchunked(ScheduleKind.Schedule,
2536
1.04k
                                 LoopArgs.Chunk != nullptr)) &&
2537
1.04k
         "static non-chunked schedule does not need outer loop");
2538
1.04k
2539
  // Emit outer loop.
2540
  //
2541
  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2542
  // When schedule(dynamic,chunk_size) is specified, the iterations are
2543
  // distributed to threads in the team in chunks as the threads request them.
2544
  // Each thread executes a chunk of iterations, then requests another chunk,
2545
  // until no chunks remain to be distributed. Each chunk contains chunk_size
2546
  // iterations, except for the last chunk to be distributed, which may have
2547
  // fewer iterations. When no chunk_size is specified, it defaults to 1.
2548
  //
2549
  // When schedule(guided,chunk_size) is specified, the iterations are assigned
2550
  // to threads in the team in chunks as the executing threads request them.
2551
  // Each thread executes a chunk of iterations, then requests another chunk,
2552
  // until no chunks remain to be assigned. For a chunk_size of 1, the size of
2553
  // each chunk is proportional to the number of unassigned iterations divided
2554
  // by the number of threads in the team, decreasing to 1. For a chunk_size
2555
  // with value k (greater than 1), the size of each chunk is determined in the
2556
  // same way, with the restriction that the chunks do not contain fewer than k
2557
  // iterations (except for the last chunk to be assigned, which may have fewer
2558
  // than k iterations).
2559
  //
2560
  // When schedule(auto) is specified, the decision regarding scheduling is
2561
  // delegated to the compiler and/or runtime system. The programmer gives the
2562
  // implementation the freedom to choose any possible mapping of iterations to
2563
  // threads in the team.
2564
  //
2565
  // When schedule(runtime) is specified, the decision regarding scheduling is
2566
  // deferred until run time, and the schedule and chunk size are taken from the
2567
  // run-sched-var ICV. If the ICV is set to auto, the schedule is
2568
  // implementation defined
2569
  //
2570
  // while(__kmpc_dispatch_next(&LB, &UB)) {
2571
  //   idx = LB;
2572
  //   while (idx <= UB) { BODY; ++idx;
2573
  //   __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
2574
  //   } // inner loop
2575
  // }
2576
  //
2577
  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2578
  // When schedule(static, chunk_size) is specified, iterations are divided into
2579
  // chunks of size chunk_size, and the chunks are assigned to the threads in
2580
  // the team in a round-robin fashion in the order of the thread number.
2581
  //
2582
  // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
2583
  //   while (idx <= UB) { BODY; ++idx; } // inner loop
2584
  //   LB = LB + ST;
2585
  //   UB = UB + ST;
2586
  // }
2587
  //
2588
1.04k
2589
1.04k
  const Expr *IVExpr = S.getIterationVariable();
2590
1.04k
  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2591
1.04k
  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2592
1.04k
2593
1.04k
  if (DynamicOrOrdered) {
2594
744
    const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
2595
744
        CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
2596
744
    llvm::Value *LBVal = DispatchBounds.first;
2597
744
    llvm::Value *UBVal = DispatchBounds.second;
2598
744
    CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
2599
744
                                                             LoopArgs.Chunk};
2600
744
    RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
2601
744
                           IVSigned, Ordered, DipatchRTInputValues);
2602
305
  } else {
2603
305
    CGOpenMPRuntime::StaticRTInput StaticInit(
2604
305
        IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
2605
305
        LoopArgs.ST, LoopArgs.Chunk);
2606
305
    RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
2607
305
                         ScheduleKind, StaticInit);
2608
305
  }
2609
1.04k
2610
1.04k
  auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
2611
1.04k
                                    const unsigned IVSize,
2612
1.05k
                                    const bool IVSigned) {
2613
1.05k
    if (Ordered) {
2614
37
      CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
2615
37
                                                            IVSigned);
2616
37
    }
2617
1.05k
  };
2618
1.04k
2619
1.04k
  OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
2620
1.04k
                                 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
2621
1.04k
  OuterLoopArgs.IncExpr = S.getInc();
2622
1.04k
  OuterLoopArgs.Init = S.getInit();
2623
1.04k
  OuterLoopArgs.Cond = S.getCond();
2624
1.04k
  OuterLoopArgs.NextLB = S.getNextLowerBound();
2625
1.04k
  OuterLoopArgs.NextUB = S.getNextUpperBound();
2626
1.04k
  EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
2627
1.04k
                   emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
2628
1.04k
}
2629
2630
static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
2631
164
                             const unsigned IVSize, const bool IVSigned) {}
2632
2633
void CodeGenFunction::EmitOMPDistributeOuterLoop(
2634
    OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
2635
    OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
2636
164
    const CodeGenLoopTy &CodeGenLoopContent) {
2637
164
2638
164
  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2639
164
2640
  // Emit outer loop.
2641
  // Same behavior as a OMPForOuterLoop, except that schedule cannot be
2642
  // dynamic
2643
  //
2644
164
2645
164
  const Expr *IVExpr = S.getIterationVariable();
2646
164
  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2647
164
  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2648
164
2649
164
  CGOpenMPRuntime::StaticRTInput StaticInit(
2650
164
      IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
2651
164
      LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
2652
164
  RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
2653
164
2654
  // for combined 'distribute' and 'for' the increment expression of distribute
2655
  // is stored in DistInc. For 'distribute' alone, it is in Inc.
2656
164
  Expr *IncExpr;
2657
164
  if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
2658
0
    IncExpr = S.getDistInc();
2659
164
  else
2660
164
    IncExpr = S.getInc();
2661
164
2662
  // this routine is shared by 'omp distribute parallel for' and
2663
  // 'omp distribute': select the right EUB expression depending on the
2664
  // directive
2665
164
  OMPLoopArguments OuterLoopArgs;
2666
164
  OuterLoopArgs.LB = LoopArgs.LB;
2667
164
  OuterLoopArgs.UB = LoopArgs.UB;
2668
164
  OuterLoopArgs.ST = LoopArgs.ST;
2669
164
  OuterLoopArgs.IL = LoopArgs.IL;
2670
164
  OuterLoopArgs.Chunk = LoopArgs.Chunk;
2671
164
  OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2672
0
                          ? S.getCombinedEnsureUpperBound()
2673
164
                          : S.getEnsureUpperBound();
2674
164
  OuterLoopArgs.IncExpr = IncExpr;
2675
164
  OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2676
0
                           ? S.getCombinedInit()
2677
164
                           : S.getInit();
2678
164
  OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2679
0
                           ? S.getCombinedCond()
2680
164
                           : S.getCond();
2681
164
  OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2682
0
                             ? S.getCombinedNextLowerBound()
2683
164
                             : S.getNextLowerBound();
2684
164
  OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2685
0
                             ? S.getCombinedNextUpperBound()
2686
164
                             : S.getNextUpperBound();
2687
164
2688
164
  EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
2689
164
                   LoopScope, OuterLoopArgs, CodeGenLoopContent,
2690
164
                   emitEmptyOrdered);
2691
164
}
2692
2693
static std::pair<LValue, LValue>
2694
emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
2695
2.52k
                                     const OMPExecutableDirective &S) {
2696
2.52k
  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2697
2.52k
  LValue LB =
2698
2.52k
      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2699
2.52k
  LValue UB =
2700
2.52k
      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2701
2.52k
2702
  // When composing 'distribute' with 'for' (e.g. as in 'distribute
2703
  // parallel for') we need to use the 'distribute'
2704
  // chunk lower and upper bounds rather than the whole loop iteration
2705
  // space. These are parameters to the outlined function for 'parallel'
2706
  // and we copy the bounds of the previous schedule into the
2707
  // the current ones.
2708
2.52k
  LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
2709
2.52k
  LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
2710
2.52k
  llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
2711
2.52k
      PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
2712
2.52k
  PrevLBVal = CGF.EmitScalarConversion(
2713
2.52k
      PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
2714
2.52k
      LS.getIterationVariable()->getType(),
2715
2.52k
      LS.getPrevLowerBoundVariable()->getExprLoc());
2716
2.52k
  llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
2717
2.52k
      PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
2718
2.52k
  PrevUBVal = CGF.EmitScalarConversion(
2719
2.52k
      PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
2720
2.52k
      LS.getIterationVariable()->getType(),
2721
2.52k
      LS.getPrevUpperBoundVariable()->getExprLoc());
2722
2.52k
2723
2.52k
  CGF.EmitStoreOfScalar(PrevLBVal, LB);
2724
2.52k
  CGF.EmitStoreOfScalar(PrevUBVal, UB);
2725
2.52k
2726
2.52k
  return {LB, UB};
2727
2.52k
}
2728
2729
/// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
2730
/// we need to use the LB and UB expressions generated by the worksharing
2731
/// code generation support, whereas in non combined situations we would
2732
/// just emit 0 and the LastIteration expression
2733
/// This function is necessary due to the difference of the LB and UB
2734
/// types for the RT emission routines for 'for_static_init' and
2735
/// 'for_dispatch_init'
2736
static std::pair<llvm::Value *, llvm::Value *>
2737
emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
2738
                                        const OMPExecutableDirective &S,
2739
440
                                        Address LB, Address UB) {
2740
440
  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2741
440
  const Expr *IVExpr = LS.getIterationVariable();
2742
  // when implementing a dynamic schedule for a 'for' combined with a
2743
  // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
2744
  // is not normalized as each team only executes its own assigned
2745
  // distribute chunk
2746
440
  QualType IteratorTy = IVExpr->getType();
2747
440
  llvm::Value *LBVal =
2748
440
      CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
2749
440
  llvm::Value *UBVal =
2750
440
      CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
2751
440
  return {LBVal, UBVal};
2752
440
}
2753
2754
static void emitDistributeParallelForDistributeInnerBoundParams(
2755
    CodeGenFunction &CGF, const OMPExecutableDirective &S,
2756
2.52k
    llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
2757
2.52k
  const auto &Dir = cast<OMPLoopDirective>(S);
2758
2.52k
  LValue LB =
2759
2.52k
      CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
2760
2.52k
  llvm::Value *LBCast =
2761
2.52k
      CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
2762
2.52k
                                CGF.SizeTy, /*isSigned=*/false);
2763
2.52k
  CapturedVars.push_back(LBCast);
2764
2.52k
  LValue UB =
2765
2.52k
      CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
2766
2.52k
2767
2.52k
  llvm::Value *UBCast =
2768
2.52k
      CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
2769
2.52k
                                CGF.SizeTy, /*isSigned=*/false);
2770
2.52k
  CapturedVars.push_back(UBCast);
2771
2.52k
}
2772
2773
static void
2774
emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
2775
                                 const OMPLoopDirective &S,
2776
2.52k
                                 CodeGenFunction::JumpDest LoopExit) {
2777
2.52k
  auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
2778
2.52k
                                         PrePostActionTy &Action) {
2779
2.52k
    Action.Enter(CGF);
2780
2.52k
    bool HasCancel = false;
2781
2.52k
    if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2782
1.20k
      if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
2783
308
        HasCancel = D->hasCancel();
2784
900
      else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
2785
356
        HasCancel = D->hasCancel();
2786
544
      else if (const auto *D =
2787
544
                   dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
2788
544
        HasCancel = D->hasCancel();
2789
1.20k
    }
2790
2.52k
    CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
2791
2.52k
                                                     HasCancel);
2792
2.52k
    CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
2793
2.52k
                               emitDistributeParallelForInnerBounds,
2794
2.52k
                               emitDistributeParallelForDispatchBounds);
2795
2.52k
  };
2796
2.52k
2797
2.52k
  emitCommonOMPParallelDirective(
2798
2.52k
      CGF, S,
2799
1.31k
      isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : 
OMPD_for1.20k
,
2800
2.52k
      CGInlinedWorksharingLoop,
2801
2.52k
      emitDistributeParallelForDistributeInnerBoundParams);
2802
2.52k
}
2803
2804
void CodeGenFunction::EmitOMPDistributeParallelForDirective(
2805
356
    const OMPDistributeParallelForDirective &S) {
2806
356
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2807
356
    CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
2808
356
                              S.getDistInc());
2809
356
  };
2810
356
  OMPLexicalScope Scope(*this, S, OMPD_parallel);
2811
356
  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
2812
356
}
2813
2814
void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
2815
252
    const OMPDistributeParallelForSimdDirective &S) {
2816
252
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2817
252
    CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
2818
252
                              S.getDistInc());
2819
252
  };
2820
252
  OMPLexicalScope Scope(*this, S, OMPD_parallel);
2821
252
  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
2822
252
}
2823
2824
void CodeGenFunction::EmitOMPDistributeSimdDirective(
2825
150
    const OMPDistributeSimdDirective &S) {
2826
150
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2827
150
    CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
2828
150
  };
2829
150
  OMPLexicalScope Scope(*this, S, OMPD_unknown);
2830
150
  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2831
150
}
2832
2833
void CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
2834
193
    CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) {
2835
  // Emit SPMD target parallel for region as a standalone region.
2836
193
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2837
193
    emitOMPSimdRegion(CGF, S, Action);
2838
193
  };
2839
193
  llvm::Function *Fn;
2840
193
  llvm::Constant *Addr;
2841
  // Emit target region as a standalone region.
2842
193
  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
2843
193
      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
2844
193
  assert(Fn && Addr && "Target device function emission failed.");
2845
193
}
2846
2847
void CodeGenFunction::EmitOMPTargetSimdDirective(
2848
327
    const OMPTargetSimdDirective &S) {
2849
327
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2850
327
    emitOMPSimdRegion(CGF, S, Action);
2851
327
  };
2852
327
  emitCommonOMPTargetDirective(*this, S, CodeGen);
2853
327
}
2854
2855
namespace {
2856
  struct ScheduleKindModifiersTy {
2857
    OpenMPScheduleClauseKind Kind;
2858
    OpenMPScheduleClauseModifier M1;
2859
    OpenMPScheduleClauseModifier M2;
2860
    ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2861
                            OpenMPScheduleClauseModifier M1,
2862
                            OpenMPScheduleClauseModifier M2)
2863
0
        : Kind(Kind), M1(M1), M2(M2) {}
2864
  };
2865
} // namespace
2866
2867
bool CodeGenFunction::EmitOMPWorksharingLoop(
2868
    const OMPLoopDirective &S, Expr *EUB,
2869
    const CodeGenLoopBoundsTy &CodeGenLoopBounds,
2870
4.47k
    const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2871
  // Emit the loop iteration variable.
2872
4.47k
  const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2873
4.47k
  const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
2874
4.47k
  EmitVarDecl(*IVDecl);
2875
4.47k
2876
  // Emit the iterations count variable.
2877
  // If it is not a variable, Sema decided to calculate iterations count on each
2878
  // iteration (e.g., it is foldable into a constant).
2879
4.47k
  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2880
0
    EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2881
    // Emit calculation of the iterations count.
2882
0
    EmitIgnoredExpr(S.getCalcLastIteration());
2883
0
  }
2884
4.47k
2885
4.47k
  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2886
4.47k
2887
4.47k
  bool HasLastprivateClause;
2888
  // Check pre-condition.
2889
4.47k
  {
2890
4.47k
    OMPLoopScope PreInitScope(*this, S);
2891
    // Skip the entire loop if we don't meet the precondition.
2892
    // If the condition constant folds and can be elided, avoid emitting the
2893
    // whole loop.
2894
4.47k
    bool CondConstant;
2895
4.47k
    llvm::BasicBlock *ContBlock = nullptr;
2896
4.47k
    if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2897
3.62k
      if (!CondConstant)
2898
52
        return false;
2899
848
    } else {
2900
848
      llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
2901
848
      ContBlock = createBasicBlock("omp.precond.end");
2902
848
      emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2903
848
                  getProfileCount(&S));
2904
848
      EmitBlock(ThenBlock);
2905
848
      incrementProfileCounter(&S);
2906
848
    }
2907
4.47k
2908
4.41k
    RunCleanupsScope DoacrossCleanupScope(*this);
2909
4.41k
    bool Ordered = false;
2910
4.41k
    if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2911
53
      if (OrderedClause->getNumForLoops())
2912
16
        RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
2913
37
      else
2914
37
        Ordered = true;
2915
53
    }
2916
4.41k
2917
4.41k
    llvm::DenseSet<const Expr *> EmittedFinals;
2918
4.41k
    emitAlignedClause(*this, S);
2919
4.41k
    bool HasLinears = EmitOMPLinearClauseInit(S);
2920
    // Emit helper vars inits.
2921
4.41k
2922
4.41k
    std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
2923
4.41k
    LValue LB = Bounds.first;
2924
4.41k
    LValue UB = Bounds.second;
2925
4.41k
    LValue ST =
2926
4.41k
        EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2927
4.41k
    LValue IL =
2928
4.41k
        EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2929
4.41k
2930
    // Emit 'then' code.
2931
4.41k
    {
2932
4.41k
      OMPPrivateScope LoopScope(*this);
2933
4.41k
      if (EmitOMPFirstprivateClause(S, LoopScope) || 
HasLinears4.41k
) {
2934
        // Emit implicit barrier to synchronize threads and avoid data races on
2935
        // initialization of firstprivate variables and post-update of
2936
        // lastprivate variables.
2937
142
        CGM.getOpenMPRuntime().emitBarrierCall(
2938
142
            *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
2939
142
            /*ForceSimpleCall=*/true);
2940
142
      }
2941
4.41k
      EmitOMPPrivateClause(S, LoopScope);
2942
4.41k
      CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
2943
4.41k
          *this, S, EmitLValue(S.getIterationVariable()));
2944
4.41k
      HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2945
4.41k
      EmitOMPReductionClauseInit(S, LoopScope);
2946
4.41k
      EmitOMPPrivateLoopCounters(S, LoopScope);
2947
4.41k
      EmitOMPLinearClause(S, LoopScope);
2948
4.41k
      (void)LoopScope.Privatize();
2949
4.41k
      if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2950
2.30k
        CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
2951
4.41k
2952
      // Detect the loop schedule kind and chunk.
2953
4.41k
      const Expr *ChunkExpr = nullptr;
2954
4.41k
      OpenMPScheduleTy ScheduleKind;
2955
4.41k
      if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
2956
1.23k
        ScheduleKind.Schedule = C->getScheduleKind();
2957
1.23k
        ScheduleKind.M1 = C->getFirstScheduleModifier();
2958
1.23k
        ScheduleKind.M2 = C->getSecondScheduleModifier();
2959
1.23k
        ChunkExpr = C->getChunkSize();
2960
3.18k
      } else {
2961
        // Default behaviour for schedule clause.
2962
3.18k
        CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
2963
3.18k
            *this, S, ScheduleKind.Schedule, ChunkExpr);
2964
3.18k
      }
2965
4.41k
      bool HasChunkSizeOne = false;
2966
4.41k
      llvm::Value *Chunk = nullptr;
2967
4.41k
      if (ChunkExpr) {
2968
659
        Chunk = EmitScalarExpr(ChunkExpr);
2969
659
        Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
2970
659
                                     S.getIterationVariable()->getType(),
2971
659
                                     S.getBeginLoc());
2972
659
        Expr::EvalResult Result;
2973
659
        if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
2974
454
          llvm::APSInt EvaluatedChunk = Result.Val.getInt();
2975
454
          HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
2976
454
        }
2977
659
      }
2978
4.41k
      const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2979
4.41k
      const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2980
      // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2981
      // If the static schedule kind is specified or if the ordered clause is
2982
      // specified, and if no monotonic modifier is specified, the effect will
2983
      // be as if the monotonic modifier was specified.
2984
4.41k
      bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
2985
527
          /* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
2986
308
          isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
2987
4.41k
      bool IsMonotonic =
2988
4.41k
          Ordered ||
2989
4.38k
          ((ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2990
3.66k
            ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) &&
2991
3.67k
           !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
2992
3.67k
             ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
2993
708
          ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2994
700
          ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2995
4.41k
      if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
2996
4.41k
                                 /* Chunked */ Chunk != nullptr) ||
2997
1.25k
           StaticChunkedOne) &&
2998
3.38k
          !Ordered) {
2999
3.36k
        JumpDest LoopExit =
3000
3.36k
            getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3001
3.36k
        emitCommonSimdLoop(
3002
3.36k
            *this, S,
3003
3.35k
            [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
3004
3.35k
              if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3005
1.64k
                CGF.EmitOMPSimdInit(S, IsMonotonic);
3006
1.71k
              } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
3007
5
                if (C->getKind() == OMPC_ORDER_concurrent)
3008
5
                  CGF.LoopStack.setParallel(/*Enable=*/true);
3009
5
              }
3010
3.35k
            },
3011
3.36k
            [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
3012
3.36k
             &S, ScheduleKind, LoopExit,
3013
3.40k
             &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
3014
              // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
3015
              // When no chunk_size is specified, the iteration space is divided
3016
              // into chunks that are approximately equal in size, and at most
3017
              // one chunk is distributed to each thread. Note that the size of
3018
              // the chunks is unspecified in this case.
3019
3.40k
              CGOpenMPRuntime::StaticRTInput StaticInit(
3020
3.40k
                  IVSize, IVSigned, Ordered, IL.getAddress(CGF),
3021
3.40k
                  LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
3022
3.18k
                  StaticChunkedOne ? 
Chunk221
: nullptr);
3023
3.40k
              CGF.CGM.getOpenMPRuntime().emitForStaticInit(
3024
3.40k
                  CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
3025
3.40k
                  StaticInit);
3026
              // UB = min(UB, GlobalUB);
3027
3.40k
              if (!StaticChunkedOne)
3028
3.18k
                CGF.EmitIgnoredExpr(S.getEnsureUpperBound());
3029
              // IV = LB;
3030
3.40k
              CGF.EmitIgnoredExpr(S.getInit());
3031
              // For unchunked static schedule generate:
3032
              //
3033
              // while (idx <= UB) {
3034
              //   BODY;
3035
              //   ++idx;
3036
              // }
3037
              //
3038
              // For static schedule with chunk one:
3039
              //
3040
              // while (IV <= PrevUB) {
3041
              //   BODY;
3042
              //   IV += ST;
3043
              // }
3044
3.40k
              CGF.EmitOMPInnerLoop(
3045
3.40k
                  S, LoopScope.requiresCleanups(),
3046
221
                  StaticChunkedOne ? S.getCombinedParForInDistCond()
3047
3.18k
                                   : S.getCond(),
3048
3.18k
                  StaticChunkedOne ? 
S.getDistInc()221
: S.getInc(),
3049
3.40k
                  [&S, LoopExit](CodeGenFunction &CGF) {
3050
3.40k
                    emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
3051
3.40k
                  },
3052
3.40k
                  [](CodeGenFunction &) {});
3053
3.40k
            });
3054
3.36k
        EmitBlock(LoopExit.getBlock());
3055
        // Tell the runtime we are done.
3056
3.43k
        auto &&CodeGen = [&S](CodeGenFunction &CGF) {
3057
3.43k
          CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3058
3.43k
                                                         S.getDirectiveKind());
3059
3.43k
        };
3060
3.36k
        OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
3061
1.04k
      } else {
3062
        // Emit the outer loop, which requests its work chunk [LB..UB] from
3063
        // runtime and runs the inner loop to process it.
3064
1.04k
        const OMPLoopArguments LoopArguments(
3065
1.04k
            LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
3066
1.04k
            IL.getAddress(*this), Chunk, EUB);
3067
1.04k
        EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
3068
1.04k
                            LoopArguments, CGDispatchBounds);
3069
1.04k
      }
3070
4.41k
      if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3071
2.11k
        EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3072
2.11k
          return CGF.Builder.CreateIsNotNull(
3073
2.11k
              CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3074
2.11k
        });
3075
2.11k
      }
3076
4.41k
      EmitOMPReductionClauseFinal(
3077
4.41k
          S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
3078
2.11k
                 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
3079
2.30k
                 : /*Parallel only*/ OMPD_parallel);
3080
      // Emit post-update of the reduction variables if IsLastIter != 0.
3081
4.41k
      emitPostUpdateForReductionClause(
3082
0
          *this, S, [IL, &S](CodeGenFunction &CGF) {
3083
0
            return CGF.Builder.CreateIsNotNull(
3084
0
                CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3085
0
          });
3086
      // Emit final copy of the lastprivate variables if IsLastIter != 0.
3087
4.41k
      if (HasLastprivateClause)
3088
196
        EmitOMPLastprivateClauseFinal(
3089
196
            S, isOpenMPSimdDirective(S.getDirectiveKind()),
3090
196
            Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3091
4.41k
    }
3092
134
    EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
3093
134
      return CGF.Builder.CreateIsNotNull(
3094
134
          CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3095
134
    });
3096
4.41k
    DoacrossCleanupScope.ForceCleanup();
3097
    // We're now done with the loop, so jump to the continuation block.
3098
4.41k
    if (ContBlock) {
3099
848
      EmitBranch(ContBlock);
3100
848
      EmitBlock(ContBlock, /*IsFinished=*/true);
3101
848
    }
3102
4.41k
  }
3103
4.41k
  return HasLastprivateClause;
3104
4.47k
}
3105
3106
/// The following two functions generate expressions for the loop lower
3107
/// and upper bounds in case of static and dynamic (dispatch) schedule
3108
/// of the associated 'for' or 'distribute' loop.
3109
static std::pair<LValue, LValue>
3110
1.89k
emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
3111
1.89k
  const auto &LS = cast<OMPLoopDirective>(S);
3112
1.89k
  LValue LB =
3113
1.89k
      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3114
1.89k
  LValue UB =
3115
1.89k
      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3116
1.89k
  return {LB, UB};
3117
1.89k
}
3118
3119
/// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
3120
/// consider the lower and upper bound expressions generated by the
3121
/// worksharing loop support, but we use 0 and the iteration space size as
3122
/// constants
3123
static std::pair<llvm::Value *, llvm::Value *>
3124
emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
3125
304
                          Address LB, Address UB) {
3126
304
  const auto &LS = cast<OMPLoopDirective>(S);
3127
304
  const Expr *IVExpr = LS.getIterationVariable();
3128
304
  const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
3129
304
  llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
3130
304
  llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
3131
304
  return {LBVal, UBVal};
3132
304
}
3133
3134
/// Emits the code for the directive with inscan reductions.
3135
/// The code is the following:
3136
/// \code
3137
/// size num_iters = <num_iters>;
3138
/// <type> buffer[num_iters];
3139
/// #pragma omp ...
3140
/// for (i: 0..<num_iters>) {
3141
///   <input phase>;
3142
///   buffer[i] = red;
3143
/// }
3144
/// for (int k = 0; k != ceil(log2(num_iters)); ++k)
3145
/// for (size cnt = last_iter; cnt >= pow(2, k); --k)
3146
///   buffer[i] op= buffer[i-pow(2,k)];
3147
/// #pragma omp ...
3148
/// for (0..<num_iters>) {
3149
///   red = InclusiveScan ? buffer[i] : buffer[i-1];
3150
///   <scan phase>;
3151
/// }
3152
/// \endcode
3153
static void emitScanBasedDirective(
3154
    CodeGenFunction &CGF, const OMPLoopDirective &S,
3155
    llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
3156
    llvm::function_ref<void(CodeGenFunction &)> FirstGen,
3157
16
    llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
3158
16
  llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3159
16
      NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3160
16
  SmallVector<const Expr *, 4> Shareds;
3161
16
  SmallVector<const Expr *, 4> Privates;
3162
16
  SmallVector<const Expr *, 4> ReductionOps;
3163
16
  SmallVector<const Expr *, 4> LHSs;
3164
16
  SmallVector<const Expr *, 4> RHSs;
3165
16
  SmallVector<const Expr *, 4> CopyOps;
3166
16
  SmallVector<const Expr *, 4> CopyArrayTemps;
3167
16
  SmallVector<const Expr *, 4> CopyArrayElems;
3168
16
  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3169
16
    assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3170
16
           "Only inscan reductions are expected.");
3171
16
    Shareds.append(C->varlist_begin(), C->varlist_end());
3172
16
    Privates.append(C->privates().begin(), C->privates().end());
3173
16
    ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3174
16
    LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3175
16
    RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3176
16
    CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
3177
16
    CopyArrayTemps.append(C->copy_array_temps().begin(),
3178
16
                          C->copy_array_temps().end());
3179
16
    CopyArrayElems.append(C->copy_array_elems().begin(),
3180
16
                          C->copy_array_elems().end());
3181
16
  }
3182
16
  {
3183
    // Emit buffers for each reduction variables.
3184
    // ReductionCodeGen is required to emit correctly the code for array
3185
    // reductions.
3186
16
    ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
3187
16
    unsigned Count = 0;
3188
16
    auto *ITA = CopyArrayTemps.begin();
3189
32
    for (const Expr *IRef : Privates) {
3190
32
      const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
3191
      // Emit variably modified arrays, used for arrays/array sections
3192
      // reductions.
3193
32
      if (PrivateVD->getType()->isVariablyModifiedType()) {
3194
16
        RedCG.emitSharedOrigLValue(CGF, Count);
3195
16
        RedCG.emitAggregateType(CGF, Count);
3196
16
      }
3197
32
      CodeGenFunction::OpaqueValueMapping DimMapping(
3198
32
          CGF,
3199
32
          cast<OpaqueValueExpr>(
3200
32
              cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
3201
32
                  ->getSizeExpr()),
3202
32
          RValue::get(OMPScanNumIterations));
3203
      // Emit temp buffer.
3204
32
      CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
3205
32
      ++ITA;
3206
32
      ++Count;
3207
32
    }
3208
16
  }
3209
16
  CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
3210
16
  {
3211
    // Emit loop with input phase:
3212
    // #pragma omp ...
3213
    // for (i: 0..<num_iters>) {
3214
    //   <input phase>;
3215
    //   buffer[i] = red;
3216
    // }
3217
16
    CGF.OMPFirstScanLoop = true;
3218
16
    CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
3219
16
    FirstGen(CGF);
3220
16
  }
3221
  // Emit prefix reduction:
3222
  // for (int k = 0; k <= ceil(log2(n)); ++k)
3223
16
  llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
3224
16
  llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
3225
16
  llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
3226
16
  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
3227
16
  llvm::Value *Arg =
3228
16
      CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
3229
16
  llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
3230
16
  F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
3231
16
  LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
3232
16
  LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
3233
16
  llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
3234
16
      OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
3235
16
  auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
3236
16
  CGF.EmitBlock(LoopBB);
3237
16
  auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
3238
  // size pow2k = 1;
3239
16
  auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3240
16
  Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
3241
16
  Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
3242
  // for (size i = n - 1; i >= 2 ^ k; --i)
3243
  //   tmp[i] op= tmp[i-pow2k];
3244
16
  llvm::BasicBlock *InnerLoopBB =
3245
16
      CGF.createBasicBlock("omp.inner.log.scan.body");
3246
16
  llvm::BasicBlock *InnerExitBB =
3247
16
      CGF.createBasicBlock("omp.inner.log.scan.exit");
3248
16
  llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
3249
16
  CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3250
16
  CGF.EmitBlock(InnerLoopBB);
3251
16
  auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3252
16
  IVal->addIncoming(NMin1, LoopBB);
3253
16
  {
3254
16
    CodeGenFunction::OMPPrivateScope PrivScope(CGF);
3255
16
    auto *ILHS = LHSs.begin();
3256
16
    auto *IRHS = RHSs.begin();
3257
32
    for (const Expr *CopyArrayElem : CopyArrayElems) {
3258
32
      const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
3259
32
      const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
3260
32
      Address LHSAddr = Address::invalid();
3261
32
      {
3262
32
        CodeGenFunction::OpaqueValueMapping IdxMapping(
3263
32
            CGF,
3264
32
            cast<OpaqueValueExpr>(
3265
32
                cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3266
32
            RValue::get(IVal));
3267
32
        LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3268
32
      }
3269
32
      PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; });
3270
32
      Address RHSAddr = Address::invalid();
3271
32
      {
3272
32
        llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
3273
32
        CodeGenFunction::OpaqueValueMapping IdxMapping(
3274
32
            CGF,
3275
32
            cast<OpaqueValueExpr>(
3276
32
                cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3277
32
            RValue::get(OffsetIVal));
3278
32
        RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3279
32
      }
3280
32
      PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; });
3281
32
      ++ILHS;
3282
32
      ++IRHS;
3283
32
    }
3284
16
    PrivScope.Privatize();
3285
16
    CGF.CGM.getOpenMPRuntime().emitReduction(
3286
16
        CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
3287
16
        {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
3288
16
  }
3289
16
  llvm::Value *NextIVal =
3290
16
      CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
3291
16
  IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
3292
16
  CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
3293
16
  CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3294
16
  CGF.EmitBlock(InnerExitBB);
3295
16
  llvm::Value *Next =
3296
16
      CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
3297
16
  Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
3298
  // pow2k <<= 1;
3299
16
  llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
3300
16
  Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
3301
16
  llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
3302
16
  CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
3303
16
  auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
3304
16
  CGF.EmitBlock(ExitBB);
3305
16
3306
16
  CGF.OMPFirstScanLoop = false;
3307
16
  SecondGen(CGF);
3308
16
}
3309
3310
static bool emitWorksharingDirective(CodeGenFunction &CGF,
3311
                                     const OMPLoopDirective &S,
3312
897
                                     bool HasCancel) {
3313
897
  bool HasLastprivates;
3314
897
  if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
3315
166
                   [](const OMPReductionClause *C) {
3316
166
                     return C->getModifier() == OMPC_REDUCTION_inscan;
3317
16
                   })) {
3318
16
    const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
3319
16
      CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
3320
16
      OMPLoopScope LoopScope(CGF, S);
3321
16
      return CGF.EmitScalarExpr(S.getNumIterations());
3322
16
    };
3323
16
    const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) {
3324
16
      CodeGenFunction::OMPCancelStackRAII CancelRegion(
3325
16
          CGF, S.getDirectiveKind(), HasCancel);
3326
16
      (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3327
16
                                       emitForLoopBounds,
3328
16
                                       emitDispatchForLoopBounds);
3329
      // Emit an implicit barrier at the end.
3330
16
      CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(),
3331
16
                                                 OMPD_for);
3332
16
    };
3333
16
    const auto &&SecondGen = [&S, HasCancel,
3334
16
                              &HasLastprivates](CodeGenFunction &CGF) {
3335
16
      CodeGenFunction::OMPCancelStackRAII CancelRegion(
3336
16
          CGF, S.getDirectiveKind(), HasCancel);
3337
16
      HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3338
16
                                                   emitForLoopBounds,
3339
16
                                                   emitDispatchForLoopBounds);
3340
16
    };
3341
16
    emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
3342
881
  } else {
3343
881
    CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
3344
881
                                                     HasCancel);
3345
881
    HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3346
881
                                                 emitForLoopBounds,
3347
881
                                                 emitDispatchForLoopBounds);
3348
881
  }
3349
897
  return HasLastprivates;
3350
897
}
3351
3352
345
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
3353
345
  bool HasLastprivates = false;
3354
345
  auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
3355
345
                                          PrePostActionTy &) {
3356
345
    HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel());
3357
345
  };
3358
345
  {
3359
345
    auto LPCRegion =
3360
345
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3361
345
    OMPLexicalScope Scope(*this, S, OMPD_unknown);
3362
345
    CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
3363
345
                                                S.hasCancel());
3364
345
  }
3365
345
3366
  // Emit an implicit barrier at the end.
3367
345
  if (!S.getSingleClause<OMPNowaitClause>() || 
HasLastprivates11
)
3368
334
    CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3369
  // Check for outer lastprivate conditional update.
3370
345
  checkForLastprivateConditionalUpdate(*this, S);
3371
345
}
3372
3373
249
void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
3374
249
  bool HasLastprivates = false;
3375
249
  auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
3376
249
                                          PrePostActionTy &) {
3377
249
    HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
3378
249
  };
3379
249
  {
3380
249
    auto LPCRegion =
3381
249
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3382
249
    OMPLexicalScope Scope(*this, S, OMPD_unknown);
3383
249
    CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3384
249
  }
3385
249
3386
  // Emit an implicit barrier at the end.
3387
249
  if (!S.getSingleClause<OMPNowaitClause>() || 
HasLastprivates0
)
3388
249
    CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3389
  // Check for outer lastprivate conditional update.
3390
249
  checkForLastprivateConditionalUpdate(*this, S);
3391
249
}
3392
3393
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
3394
                                const Twine &Name,
3395
440
                                llvm::Value *Init = nullptr) {
3396
440
  LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
3397
440
  if (Init)
3398
352
    CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
3399
440
  return LVal;
3400
440
}
3401
3402
88
void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
3403
88
  const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
3404
88
  const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
3405
88
  bool HasLastprivates = false;
3406
88
  auto &&CodeGen = [&S, CapturedStmt, CS,
3407
88
                    &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
3408
88
    const ASTContext &C = CGF.getContext();
3409
88
    QualType KmpInt32Ty =
3410
88
        C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3411
    // Emit helper vars inits.
3412
88
    LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
3413
88
                                  CGF.Builder.getInt32(0));
3414
88
    llvm::ConstantInt *GlobalUBVal = CS != nullptr
3415
88
                                         ? CGF.Builder.getInt32(CS->size() - 1)
3416
0
                                         : CGF.Builder.getInt32(0);
3417
88
    LValue UB =
3418
88
        createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
3419
88
    LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
3420
88
                                  CGF.Builder.getInt32(1));
3421
88
    LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
3422
88
                                  CGF.Builder.getInt32(0));
3423
    // Loop counter.
3424
88
    LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
3425
88
    OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
3426
88
    CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
3427
88
    OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
3428
88
    CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
3429
    // Generate condition for loop.
3430
88
    BinaryOperator *Cond = BinaryOperator::Create(
3431
88
        C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary,
3432
88
        S.getBeginLoc(), FPOptionsOverride());
3433
    // Increment for loop counter.
3434
88
    UnaryOperator *Inc = UnaryOperator::Create(
3435
88
        C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
3436
88
        S.getBeginLoc(), true, FPOptionsOverride());
3437
88
    auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
3438
      // Iterate through all sections and emit a switch construct:
3439
      // switch (IV) {
3440
      //   case 0:
3441
      //     <SectionStmt[0]>;
3442
      //     break;
3443
      // ...
3444
      //   case <NumSection> - 1:
3445
      //     <SectionStmt[<NumSection> - 1]>;
3446
      //     break;
3447
      // }
3448
      // .omp.sections.exit:
3449
88
      llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
3450
88
      llvm::SwitchInst *SwitchStmt =
3451
88
          CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
3452
88
                                   ExitBB, CS == nullptr ? 
10
: CS->size());
3453
88
      if (CS) {
3454
88
        unsigned CaseNumber = 0;
3455
138
        for (const Stmt *SubStmt : CS->children()) {
3456
138
          auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
3457
138
          CGF.EmitBlock(CaseBB);
3458
138
          SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
3459
138
          CGF.EmitStmt(SubStmt);
3460
138
          CGF.EmitBranch(ExitBB);
3461
138
          ++CaseNumber;
3462
138
        }
3463
0
      } else {
3464
0
        llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
3465
0
        CGF.EmitBlock(CaseBB);
3466
0
        SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
3467
0
        CGF.EmitStmt(CapturedStmt);
3468
0
        CGF.EmitBranch(ExitBB);
3469
0
      }
3470
88
      CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3471
88
    };
3472
88
3473
88
    CodeGenFunction::OMPPrivateScope LoopScope(CGF);
3474
88
    if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
3475
      // Emit implicit barrier to synchronize threads and avoid data races on
3476
      // initialization of firstprivate variables and post-update of lastprivate
3477
      // variables.
3478
0
      CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3479
0
          CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3480
0
          /*ForceSimpleCall=*/true);
3481
0
    }
3482
88
    CGF.EmitOMPPrivateClause(S, LoopScope);
3483
88
    CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV);
3484
88
    HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
3485
88
    CGF.EmitOMPReductionClauseInit(S, LoopScope);
3486
88
    (void)LoopScope.Privatize();
3487
88
    if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3488
0
      CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
3489
88
3490
    // Emit static non-chunked loop.
3491
88
    OpenMPScheduleTy ScheduleKind;
3492
88
    ScheduleKind.Schedule = OMPC_SCHEDULE_static;
3493
88
    CGOpenMPRuntime::StaticRTInput StaticInit(
3494
88
        /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
3495
88
        LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
3496
88
    CGF.CGM.getOpenMPRuntime().emitForStaticInit(
3497
88
        CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
3498
    // UB = min(UB, GlobalUB);
3499
88
    llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
3500
88
    llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
3501
88
        CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
3502
88
    CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
3503
    // IV = LB;
3504
88
    CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
3505
    // while (idx <= UB) { BODY; ++idx; }
3506
88
    CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen,
3507
88
                         [](CodeGenFunction &) {});
3508
    // Tell the runtime we are done.
3509
128
    auto &&CodeGen = [&S](CodeGenFunction &CGF) {
3510
128
      CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3511
128
                                                     S.getDirectiveKind());
3512
128
    };
3513
88
    CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
3514
88
    CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
3515
    // Emit post-update of the reduction variables if IsLastIter != 0.
3516
0
    emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
3517
0
      return CGF.Builder.CreateIsNotNull(
3518
0
          CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3519
0
    });
3520
88
3521
    // Emit final copy of the lastprivate variables if IsLastIter != 0.
3522
88
    if (HasLastprivates)
3523
16
      CGF.EmitOMPLastprivateClauseFinal(
3524
16
          S, /*NoFinals=*/false,
3525
16
          CGF.Builder.CreateIsNotNull(
3526
16
              CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
3527
88
  };
3528
88
3529
88
  bool HasCancel = false;
3530
88
  if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
3531
62
    HasCancel = OSD->hasCancel();
3532
26
  else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
3533
26
    HasCancel = OPSD->hasCancel();
3534
88
  OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
3535
88
  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
3536
88
                                              HasCancel);
3537
  // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
3538
  // clause. Otherwise the barrier will be generated by the codegen for the
3539
  // directive.
3540
88
  if (HasLastprivates && 
S.getSingleClause<OMPNowaitClause>()16
) {
3541
    // Emit implicit barrier to synchronize threads and avoid data races on
3542
    // initialization of firstprivate variables.
3543
0
    CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
3544
0
                                           OMPD_unknown);
3545
0
  }
3546
88
}
3547
3548
62
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
3549
62
  {
3550
62
    auto LPCRegion =
3551
62
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3552
62
    OMPLexicalScope Scope(*this, S, OMPD_unknown);
3553
62
    EmitSections(S);
3554
62
  }
3555
  // Emit an implicit barrier at the end.
3556
62
  if (!S.getSingleClause<OMPNowaitClause>()) {
3557
56
    CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
3558
56
                                           OMPD_sections);
3559
56
  }
3560
  // Check for outer lastprivate conditional update.
3561
62
  checkForLastprivateConditionalUpdate(*this, S);
3562
62
}
3563
3564
54
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
3565
54
  LexicalScope Scope(*this, S.getSourceRange());
3566
54
  EmitStopPoint(&S);
3567
54
  EmitStmt(S.getAssociatedStmt());
3568
54
}
3569
3570
55
void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
3571
55
  llvm::SmallVector<const Expr *, 8> CopyprivateVars;
3572
55
  llvm::SmallVector<const Expr *, 8> DestExprs;
3573
55
  llvm::SmallVector<const Expr *, 8> SrcExprs;
3574
55
  llvm::SmallVector<const Expr *, 8> AssignmentOps;
3575
  // Check if there are any 'copyprivate' clauses associated with this
3576
  // 'single' construct.
3577
  // Build a list of copyprivate variables along with helper expressions
3578
  // (<source>, <destination>, <destination>=<source> expressions)
3579
28
  for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
3580
28
    CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
3581
28
    DestExprs.append(C->destination_exprs().begin(),
3582
28
                     C->destination_exprs().end());
3583
28
    SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
3584
28
    AssignmentOps.append(C->assignment_ops().begin(),
3585
28
                         C->assignment_ops().end());
3586
28
  }
3587
  // Emit code for 'single' region along with 'copyprivate' clauses
3588
55
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3589
55
    Action.Enter(CGF);
3590
55
    OMPPrivateScope SingleScope(CGF);
3591
55
    (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
3592
55
    CGF.EmitOMPPrivateClause(S, SingleScope);
3593
55
    (void)SingleScope.Privatize();
3594
55
    CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
3595
55
  };
3596
55
  {
3597
55
    auto LPCRegion =
3598
55
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3599
55
    OMPLexicalScope Scope(*this, S, OMPD_unknown);
3600
55
    CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
3601
55
                                            CopyprivateVars, DestExprs,
3602
55
                                            SrcExprs, AssignmentOps);
3603
55
  }
3604
  // Emit an implicit barrier at the end (to avoid data race on firstprivate
3605
  // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
3606
55
  if (!S.getSingleClause<OMPNowaitClause>() && 
CopyprivateVars.empty()48
) {
3607
20
    CGM.getOpenMPRuntime().emitBarrierCall(
3608
20
        *this, S.getBeginLoc(),
3609
20
        S.getSingleClause<OMPNowaitClause>() ? 
OMPD_unknown0
: OMPD_single);
3610
20
  }
3611
  // Check for outer lastprivate conditional update.
3612
55
  checkForLastprivateConditionalUpdate(*this, S);
3613
55
}
3614
3615
39
static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
3616
39
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3617
39
    Action.Enter(CGF);
3618
39
    CGF.EmitStmt(S.getRawStmt());
3619
39
  };
3620
39
  CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
3621
39
}
3622
3623
15
void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
3624
15
  if (CGM.getLangOpts().OpenMPIRBuilder) {
3625
6
    llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
3626
6
    using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
3627
6
3628
6
    const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt();
3629
6
3630
6
    auto FiniCB = [this](InsertPointTy IP) {
3631
6
      OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
3632
6
    };
3633
6
3634
6
    auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
3635
6
                                                  InsertPointTy CodeGenIP,
3636
6
                                                  llvm::BasicBlock &FiniBB) {
3637
6
      OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
3638
6
      OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt,
3639
6
                                             CodeGenIP, FiniBB);
3640
6
    };
3641
6
3642
6
    LexicalScope Scope(*this, S.getSourceRange());
3643
6
    EmitStopPoint(&S);
3644
6
    Builder.restoreIP(OMPBuilder.CreateMaster(Builder, BodyGenCB, FiniCB));
3645
6
3646
6
    return;
3647
6
  }
3648
9
  LexicalScope Scope(*this, S.getSourceRange());
3649
9
  EmitStopPoint(&S);
3650
9
  emitMaster(*this, S);
3651
9
}
3652
3653
56
void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
3654
56
  if (CGM.getLangOpts().OpenMPIRBuilder) {
3655
14
    llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
3656
14
    using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
3657
14
3658
14
    const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt();
3659
14
    const Expr *Hint = nullptr;
3660
14
    if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
3661
2
      Hint = HintClause->getHint();
3662
14
3663
    // TODO: This is slightly different from what's currently being done in
3664
    // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything
3665
    // about typing is final.
3666
14
    llvm::Value *HintInst = nullptr;
3667
14
    if (Hint)
3668
2
      HintInst =
3669
2
          Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false);
3670
14
3671
12
    auto FiniCB = [this](InsertPointTy IP) {
3672
12
      OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
3673
12
    };
3674
14
3675
14
    auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
3676
14
                                                    InsertPointTy CodeGenIP,
3677
14
                                                    llvm::BasicBlock &FiniBB) {
3678
14
      OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
3679
14
      OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt,
3680
14
                                             CodeGenIP, FiniBB);
3681
14
    };
3682
14
3683
14
    LexicalScope Scope(*this, S.getSourceRange());
3684
14
    EmitStopPoint(&S);
3685
14
    Builder.restoreIP(OMPBuilder.CreateCritical(
3686
14
        Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
3687
14
        HintInst));
3688
14
3689
14
    return;
3690
14
  }
3691
42
3692
42
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3693
42
    Action.Enter(CGF);
3694
42
    CGF.EmitStmt(S.getAssociatedStmt());
3695
42
  };
3696
42
  const Expr *Hint = nullptr;
3697
42
  if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
3698
3
    Hint = HintClause->getHint();
3699
42
  LexicalScope Scope(*this, S.getSourceRange());
3700
42
  EmitStopPoint(&S);
3701
42
  CGM.getOpenMPRuntime().emitCriticalRegion(*this,
3702
42
                                            S.getDirectiveName().getAsString(),
3703
42
                                            CodeGen, S.getBeginLoc(), Hint);
3704
42
}
3705
3706
void CodeGenFunction::EmitOMPParallelForDirective(
3707
207
    const OMPParallelForDirective &S) {
3708
  // Emit directive as a combined directive that consists of two implicit
3709
  // directives: 'parallel' with 'for' directive.
3710
207
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3711
207
    Action.Enter(CGF);
3712
207
    (void)emitWorksharingDirective(CGF, S, S.hasCancel());
3713
207
  };
3714
207
  {
3715
207
    auto LPCRegion =
3716
207
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3717
207
    emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
3718
207
                                   emitEmptyBoundParameters);
3719
207
  }
3720
  // Check for outer lastprivate conditional update.
3721
207
  checkForLastprivateConditionalUpdate(*this, S);
3722
207
}
3723
3724
void CodeGenFunction::EmitOMPParallelForSimdDirective(
3725
96
    const OMPParallelForSimdDirective &S) {
3726
  // Emit directive as a combined directive that consists of two implicit
3727
  // directives: 'parallel' with 'for' directive.
3728
96
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3729
96
    Action.Enter(CGF);
3730
96
    (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
3731
96
  };
3732
96
  {
3733
96
    auto LPCRegion =
3734
96
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3735
96
    emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
3736
96
                                   emitEmptyBoundParameters);
3737
96
  }
3738
  // Check for outer lastprivate conditional update.
3739
96
  checkForLastprivateConditionalUpdate(*this, S);
3740
96
}
3741
3742
void CodeGenFunction::EmitOMPParallelMasterDirective(
3743
30
    const OMPParallelMasterDirective &S) {
3744
  // Emit directive as a combined directive that consists of two implicit
3745
  // directives: 'parallel' with 'master' directive.
3746
30
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3747
30
    Action.Enter(CGF);
3748
30
    OMPPrivateScope PrivateScope(CGF);
3749
30
    bool Copyins = CGF.EmitOMPCopyinClause(S);
3750
30
    (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3751
30
    if (Copyins) {
3752
      // Emit implicit barrier to synchronize threads and avoid data races on
3753
      // propagation master's thread values of threadprivate variables to local
3754
      // instances of that variables of all other implicit threads.
3755
4
      CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3756
4
          CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3757
4
          /*ForceSimpleCall=*/true);
3758
4
    }
3759
30
    CGF.EmitOMPPrivateClause(S, PrivateScope);
3760
30
    CGF.EmitOMPReductionClauseInit(S, PrivateScope);
3761
30
    (void)PrivateScope.Privatize();
3762
30
    emitMaster(CGF, S);
3763
30
    CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
3764
30
  };
3765
30
  {
3766
30
    auto LPCRegion =
3767
30
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3768
30
    emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
3769
30
                                   emitEmptyBoundParameters);
3770
30
    emitPostUpdateForReductionClause(*this, S,
3771
0
                                     [](CodeGenFunction &) { return nullptr; });
3772
30
  }
3773
  // Check for outer lastprivate conditional update.
3774
30
  checkForLastprivateConditionalUpdate(*this, S);
3775
30
}
3776
3777
void CodeGenFunction::EmitOMPParallelSectionsDirective(
3778
26
    const OMPParallelSectionsDirective &S) {
3779
  // Emit directive as a combined directive that consists of two implicit
3780
  // directives: 'parallel' with 'sections' directive.
3781
26
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3782
26
    Action.Enter(CGF);
3783
26
    CGF.EmitSections(S);
3784
26
  };
3785
26
  {
3786
26
    auto LPCRegion =
3787
26
        CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3788
26
    emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
3789
26
                                   emitEmptyBoundParameters);
3790
26
  }
3791
  // Check for outer lastprivate conditional update.
3792
26
  checkForLastprivateConditionalUpdate(*this, S);
3793
26
}
3794
3795
namespace {
3796
/// Get the list of variables declared in the context of the untied tasks.
3797
class CheckVarsEscapingUntiedTaskDeclContext final
3798
    : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> {
3799
  llvm::SmallVector<const VarDecl *, 4> PrivateDecls;
3800
3801
public:
3802
16
  explicit CheckVarsEscapingUntiedTaskDeclContext() = default;
3803
16
  virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default;
3804
6
  void VisitDeclStmt(const DeclStmt *S) {
3805
6
    if (!S)
3806
0
      return;
3807
    // Need to privatize only local vars, static locals can be processed as is.
3808
10
    
for (const Decl *D : S->decls())6
{
3809
10
      if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
3810
8
        if (VD->hasLocalStorage())
3811
8
          PrivateDecls.push_back(VD);
3812
10
    }
3813
6
  }
3814
16
  void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; }
3815
0
  void VisitCapturedStmt(const CapturedStmt *) { return; }
3816
0
  void VisitLambdaExpr(const LambdaExpr *) { return; }
3817
0
  void VisitBlockExpr(const BlockExpr *) { return; }
3818
108
  void VisitStmt(const Stmt *S) {
3819
108
    if (!S)
3820
0
      return;
3821
108
    for (const Stmt *Child : S->children())
3822
114
      if (Child)
3823
114
        Visit(Child);
3824
108
  }
3825
3826
  /// Swaps list of vars with the provided one.
3827
32
  ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; }
3828
};
3829
} // anonymous namespace
3830
3831
void CodeGenFunction::EmitOMPTaskBasedDirective(
3832
    const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
3833
    const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
3834
399
    OMPTaskDataTy &Data) {
3835
  // Emit outlined function for task construct.
3836
399
  const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
3837
399
  auto I = CS->getCapturedDecl()->param_begin();
3838
399
  auto PartId = std::next(I);
3839
399
  auto TaskT = std::next(I, 4);
3840
  // Check if the task is final
3841
399
  if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
3842
    // If the condition constant folds and can be elided, try to avoid emitting
3843
    // the condition and the dead arm of the if/else.
3844
22
    const Expr *Cond = Clause->getCondition();
3845
22
    bool CondConstant;
3846
22
    if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
3847
12
      Data.Final.setInt(CondConstant);
3848
10
    else
3849
10
      Data.Final.setPointer(EvaluateExprAsBool(Cond));
3850
377
  } else {
3851
    // By default the task is not final.
3852
377
    Data.Final.setInt(/*IntVal=*/false);
3853
377
  }
3854
  // Check if the task has 'priority' clause.
3855
399
  if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
3856
22
    const Expr *Prio = Clause->getPriority();
3857
22
    Data.Priority.setInt(/*IntVal=*/true);
3858
22
    Data.Priority.setPointer(EmitScalarConversion(
3859
22
        EmitScalarExpr(Prio), Prio->getType(),
3860
22
        getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
3861
22
        Prio->getExprLoc()));
3862
22
  }
3863
  // The first function argument for tasks is a thread id, the second one is a
3864
  // part id (0 for tied tasks, >=0 for untied task).
3865
399
  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
3866
  // Get list of private variables.
3867
50
  for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
3868
50
    auto IRef = C->varlist_begin();
3869
226
    for (const Expr *IInit : C->private_copies()) {
3870
226
      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
3871
226
      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
3872
170
        Data.PrivateVars.push_back(*IRef);
3873
170
        Data.PrivateCopies.push_back(IInit);
3874
170
      }
3875
226
      ++IRef;
3876
226
    }
3877
50
  }
3878
399
  EmittedAsPrivate.clear();
3879
  // Get list of firstprivate variables.
3880
105
  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
3881
105
    auto IRef = C->varlist_begin();
3882
105
    auto IElemInitRef = C->inits().begin();
3883
321
    for (const Expr *IInit : C->private_copies()) {
3884
321
      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
3885
321
      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
3886
247
        Data.FirstprivateVars.push_back(*IRef);
3887
247
        Data.FirstprivateCopies.push_back(IInit);
3888
247
        Data.FirstprivateInits.push_back(*IElemInitRef);
3889
247
      }
3890
321
      ++IRef;
3891
321
      ++IElemInitRef;
3892
321
    }
3893
105
  }
3894
  // Get list of lastprivate variables (for taskloops).
3895
399
  llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
3896
49
  for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
3897
49
    auto IRef = C->varlist_begin();
3898
49
    auto ID = C->destination_exprs().begin();
3899
199
    for (const Expr *IInit : C->private_copies()) {
3900
199
      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
3901
199
      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
3902
151
        Data.LastprivateVars.push_back(*IRef);
3903
151
        Data.LastprivateCopies.push_back(IInit);
3904
151
      }
3905
199
      LastprivateDstsOrigs.insert(
3906
199
          {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
3907
199
           cast<DeclRefExpr>(*IRef)});
3908
199
      ++IRef;
3909
199
      ++ID;
3910
199
    }
3911
49
  }
3912
399
  SmallVector<const Expr *, 4> LHSs;
3913
399
  SmallVector<const Expr *, 4> RHSs;
3914
6
  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3915
6
    Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
3916
6
    Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
3917
6
    Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
3918
6
    Data.ReductionOps.append(C->reduction_ops().begin(),
3919
6
                             C->reduction_ops().end());
3920
6
    LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3921
6
    RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3922
6
  }
3923
399
  Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
3924
399
      *this, S.getBeginLoc(), LHSs, RHSs, Data);
3925
  // Build list of dependences.
3926
38
  for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
3927
38
    OMPTaskDataTy::DependData &DD =
3928
38
        Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
3929
38
    DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
3930
38
  }
3931
  // Get list of local vars for untied tasks.
3932
399
  if (!Data.Tied) {
3933
16
    CheckVarsEscapingUntiedTaskDeclContext Checker;
3934
16
    Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt());
3935
16
    Data.PrivateLocals.append(Checker.getPrivateDecls().begin(),
3936
16
                              Checker.getPrivateDecls().end());
3937
16
  }
3938
399
  auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
3939
399
                    CapturedRegion](CodeGenFunction &CGF,
3940
399
                                    PrePostActionTy &Action) {
3941
399
    llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>
3942
399
        UntiedLocalVars;
3943
    // Set proper addresses for generated private copies.
3944
399
    OMPPrivateScope Scope(CGF);
3945
399
    llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
3946
399
    if (!Data.PrivateVars.empty() || 
!Data.FirstprivateVars.empty()349
||
3947
246
        !Data.LastprivateVars.empty() || 
!Data.PrivateLocals.empty()197
) {
3948
202
      llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
3949
202
          CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
3950
202
      enum { PrivatesParam = 2, CopyFnParam = 3 };
3951
202
      llvm::Value *CopyFn = CGF.Builder.CreateLoad(
3952
202
          CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
3953
202
      llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
3954
202
          CS->getCapturedDecl()->getParam(PrivatesParam)));
3955
      // Map privates.
3956
202
      llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
3957
202
      llvm::SmallVector<llvm::Value *, 16> CallArgs;
3958
202
      CallArgs.push_back(PrivatesPtr);
3959
170
      for (const Expr *E : Data.PrivateVars) {
3960
170
        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3961
170
        Address PrivatePtr = CGF.CreateMemTemp(
3962
170
            CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
3963
170
        PrivatePtrs.emplace_back(VD, PrivatePtr);
3964
170
        CallArgs.push_back(PrivatePtr.getPointer());
3965
170
      }
3966
247
      for (const Expr *E : Data.FirstprivateVars) {
3967
247
        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3968
247
        Address PrivatePtr =
3969
247
            CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
3970
247
                              ".firstpriv.ptr.addr");
3971
247
        PrivatePtrs.emplace_back(VD, PrivatePtr);
3972
247
        FirstprivatePtrs.emplace_back(VD, PrivatePtr);
3973
247
        CallArgs.push_back(PrivatePtr.getPointer());
3974
247
      }
3975
151
      for (const Expr *E : Data.LastprivateVars) {
3976
151
        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3977
151
        Address PrivatePtr =
3978
151
            CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
3979
151
                              ".lastpriv.ptr.addr");
3980
151
        PrivatePtrs.emplace_back(VD, PrivatePtr);
3981
151
        CallArgs.push_back(PrivatePtr.getPointer());
3982
151
      }
3983
8
      for (const VarDecl *VD : Data.PrivateLocals) {
3984
8
        QualType Ty = VD->getType().getNonReferenceType();
3985
8
        if (VD->getType()->isLValueReferenceType())
3986
0
          Ty = CGF.getContext().getPointerType(Ty);
3987
8
        if (isAllocatableDecl(VD))
3988
2
          Ty = CGF.getContext().getPointerType(Ty);
3989
8
        Address PrivatePtr = CGF.CreateMemTemp(
3990
8
            CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
3991
8
        UntiedLocalVars.try_emplace(VD, PrivatePtr, Address::invalid());
3992
8
        CallArgs.push_back(PrivatePtr.getPointer());
3993
8
      }
3994
202
      CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3995
202
          CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
3996
199
      for (const auto &Pair : LastprivateDstsOrigs) {
3997
199
        const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
3998
199
        DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
3999
                        /*RefersToEnclosingVariableOrCapture=*/
4000
199
                            CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
4001
199
                        Pair.second->getType(), VK_LValue,
4002
199
                        Pair.second->getExprLoc());
4003
199
        Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
4004
199
          return CGF.EmitLValue(&DRE).getAddress(CGF);
4005
199
        });
4006
199
      }
4007
568
      for (const auto &Pair : PrivatePtrs) {
4008
568
        Address Replacement(CGF.Builder.CreateLoad(Pair.second),
4009
568
                            CGF.getContext().getDeclAlign(Pair.first));
4010
568
        Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
4011
568
      }
4012
      // Adjust mapping for internal locals by mapping actual memory instead of
4013
      // a pointer to this memory.
4014
8
      for (auto &Pair : UntiedLocalVars) {
4015
8
        if (isAllocatableDecl(Pair.first)) {
4016
2
          llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4017
2
          Address Replacement(Ptr, CGF.getPointerAlign());
4018
2
          Pair.getSecond().first = Replacement;
4019
2
          Ptr = CGF.Builder.CreateLoad(Replacement);
4020
2
          Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first));
4021
2
          Pair.getSecond().second = Replacement;
4022
6
        } else {
4023
6
          llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4024
6
          Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first));
4025
6
          Pair.getSecond().first = Replacement;
4026
6
        }
4027
8
      }
4028
202
    }
4029
399
    if (Data.Reductions) {
4030
6
      OMPPrivateScope FirstprivateScope(CGF);
4031
18
      for (const auto &Pair : FirstprivatePtrs) {
4032
18
        Address Replacement(CGF.Builder.CreateLoad(Pair.second),
4033
18
                            CGF.getContext().getDeclAlign(Pair.first));
4034
18
        FirstprivateScope.addPrivate(Pair.first,
4035
18
                                     [Replacement]() { return Replacement; });
4036
18
      }
4037
6
      (void)FirstprivateScope.Privatize();
4038
6
      OMPLexicalScope LexScope(CGF, S, CapturedRegion);
4039
6
      ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
4040
6
                             Data.ReductionCopies, Data.ReductionOps);
4041
6
      llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
4042
6
          CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
4043
30
      for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; 
++Cnt24
) {
4044
24
        RedCG.emitSharedOrigLValue(CGF, Cnt);
4045
24
        RedCG.emitAggregateType(CGF, Cnt);
4046
        // FIXME: This must removed once the runtime library is fixed.
4047
        // Emit required threadprivate variables for
4048
        // initializer/combiner/finalizer.
4049
24
        CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4050
24
                                                           RedCG, Cnt);
4051
24
        Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4052
24
            CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4053
24
        Replacement =
4054
24
            Address(CGF.EmitScalarConversion(
4055
24
                        Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4056
24
                        CGF.getContext().getPointerType(
4057
24
                            Data.ReductionCopies[Cnt]->getType()),
4058
24
                        Data.ReductionCopies[Cnt]->getExprLoc()),
4059
24
                    Replacement.getAlignment());
4060
24
        Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4061
24
        Scope.addPrivate(RedCG.getBaseDecl(Cnt),
4062
24
                         [Replacement]() { return Replacement; });
4063
24
      }
4064
6
    }
4065
    // Privatize all private variables except for in_reduction items.
4066
399
    (void)Scope.Privatize();
4067
399
    SmallVector<const Expr *, 4> InRedVars;
4068
399
    SmallVector<const Expr *, 4> InRedPrivs;
4069
399
    SmallVector<const Expr *, 4> InRedOps;
4070
399
    SmallVector<const Expr *, 4> TaskgroupDescriptors;
4071
44
    for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
4072
44
      auto IPriv = C->privates().begin();
4073
44
      auto IRed = C->reduction_ops().begin();
4074
44
      auto ITD = C->taskgroup_descriptors().begin();
4075
66
      for (const Expr *Ref : C->varlists()) {
4076
66
        InRedVars.emplace_back(Ref);
4077
66
        InRedPrivs.emplace_back(*IPriv);
4078
66
        InRedOps.emplace_back(*IRed);
4079
66
        TaskgroupDescriptors.emplace_back(*ITD);
4080
66
        std::advance(IPriv, 1);
4081
66
        std::advance(IRed, 1);
4082
66
        std::advance(ITD, 1);
4083
66
      }
4084
44
    }
4085
    // Privatize in_reduction items here, because taskgroup descriptors must be
4086
    // privatized earlier.
4087
399
    OMPPrivateScope InRedScope(CGF);
4088
399
    if (!InRedVars.empty()) {
4089
34
      ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
4090
100
      for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; 
++Cnt66
) {
4091
66
        RedCG.emitSharedOrigLValue(CGF, Cnt);
4092
66
        RedCG.emitAggregateType(CGF, Cnt);
4093
        // The taskgroup descriptor variable is always implicit firstprivate and
4094
        // privatized already during processing of the firstprivates.
4095
        // FIXME: This must removed once the runtime library is fixed.
4096
        // Emit required threadprivate variables for
4097
        // initializer/combiner/finalizer.
4098
66
        CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4099
66
                                                           RedCG, Cnt);
4100
66
        llvm::Value *ReductionsPtr;
4101
66
        if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
4102
64
          ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
4103
64
                                               TRExpr->getExprLoc());
4104
2
        } else {
4105
2
          ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4106
2
        }
4107
66
        Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4108
66
            CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4109
66
        Replacement = Address(
4110
66
            CGF.EmitScalarConversion(
4111
66
                Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4112
66
                CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
4113
66
                InRedPrivs[Cnt]->getExprLoc()),
4114
66
            Replacement.getAlignment());
4115
66
        Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4116
66
        InRedScope.addPrivate(RedCG.getBaseDecl(Cnt),
4117
66
                              [Replacement]() { return Replacement; });
4118
66
      }
4119
34
    }
4120
399
    (void)InRedScope.Privatize();
4121
399
4122
399
    CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF,
4123
399
                                                             UntiedLocalVars);
4124
399
    Action.Enter(CGF);
4125
399
    BodyGen(CGF);
4126
399
  };
4127
399
  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
4128
399
      S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
4129
399
      Data.NumberOfParts);
4130
399
  OMPLexicalScope Scope(*this, S, llvm::None,
4131
399
                        !isOpenMPParallelDirective(S.getDirectiveKind()) &&
4132
327
                            !isOpenMPSimdDirective(S.getDirectiveKind()));
4133
399
  TaskGen(*this, OutlinedFn, Data);
4134
399
}
4135
4136
static ImplicitParamDecl *
4137
createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
4138
                                  QualType Ty, CapturedDecl *CD,
4139
448
                                  SourceLocation Loc) {
4140
448
  auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4141
448
                                           ImplicitParamDecl::Other);
4142
448
  auto *OrigRef = DeclRefExpr::Create(
4143
448
      C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
4144
448
      /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4145
448
  auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4146
448
                                              ImplicitParamDecl::Other);
4147
448
  auto *PrivateRef = DeclRefExpr::Create(
4148
448
      C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
4149
448
      /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4150
448
  QualType ElemType = C.getBaseElementType(Ty);
4151
448
  auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
4152
448
                                           ImplicitParamDecl::Other);
4153
448
  auto *InitRef = DeclRefExpr::Create(
4154
448
      C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
4155
448
      /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
4156
448
  PrivateVD->setInitStyle(VarDecl::CInit);
4157
448
  PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
4158
448
                                              InitRef, /*BasePath=*/nullptr,
4159
448
                                              VK_RValue, FPOptionsOverride()));
4160
448
  Data.FirstprivateVars.emplace_back(OrigRef);
4161
448
  Data.FirstprivateCopies.emplace_back(PrivateRef);
4162
448
  Data.FirstprivateInits.emplace_back(InitRef);
4163
448
  return OrigVD;
4164
448
}
4165
4166
void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
4167
    const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
4168
308
    OMPTargetDataInfo &InputInfo) {
4169
  // Emit outlined function for task construct.
4170
308
  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
4171
308
  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
4172
308
  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4173
308
  auto I = CS->getCapturedDecl()->param_begin();
4174
308
  auto PartId = std::next(I);
4175
308
  auto TaskT = std::next(I, 4);
4176
308
  OMPTaskDataTy Data;
4177
  // The task is not final.
4178
308
  Data.Final.setInt(/*IntVal=*/false);
4179
  // Get list of firstprivate variables.
4180
192
  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4181
192
    auto IRef = C->varlist_begin();
4182
192
    auto IElemInitRef = C->inits().begin();
4183
320
    for (auto *IInit : C->private_copies()) {
4184
320
      Data.FirstprivateVars.push_back(*IRef);
4185
320
      Data.FirstprivateCopies.push_back(IInit);
4186
320
      Data.FirstprivateInits.push_back(*IElemInitRef);
4187
320
      ++IRef;
4188
320
      ++IElemInitRef;
4189
320
    }
4190
192
  }
4191
308
  OMPPrivateScope TargetScope(*this);
4192
308
  VarDecl *BPVD = nullptr;
4193
308
  VarDecl *PVD = nullptr;
4194
308
  VarDecl *SVD = nullptr;
4195
308
  VarDecl *MVD = nullptr;
4196
308
  if (InputInfo.NumberOfTargetItems > 0) {
4197
112
    auto *CD = CapturedDecl::Create(
4198
112
        getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
4199
112
    llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
4200
112
    QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType(
4201
112
        getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal,
4202
112
        /*IndexTypeQuals=*/0);
4203
112
    BPVD = createImplicitFirstprivateForType(
4204
112
        getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4205
112
    PVD = createImplicitFirstprivateForType(
4206
112
        getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4207
112
    QualType SizesType = getContext().getConstantArrayType(
4208
112
        getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
4209
112
        ArrSize, nullptr, ArrayType::Normal,
4210
112
        /*IndexTypeQuals=*/0);
4211
112
    SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
4212
112
                                            S.getBeginLoc());
4213
112
    MVD = createImplicitFirstprivateForType(
4214
112
        getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4215
112
    TargetScope.addPrivate(
4216
112
        BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
4217
112
    TargetScope.addPrivate(PVD,
4218
112
                           [&InputInfo]() { return InputInfo.PointersArray; });
4219
112
    TargetScope.addPrivate(SVD,
4220
112
                           [&InputInfo]() { return InputInfo.SizesArray; });
4221
112
    TargetScope.addPrivate(MVD,
4222
112
                           [&InputInfo]() { return InputInfo.MappersArray; });
4223
112
  }
4224
308
  (void)TargetScope.Privatize();
4225
  // Build list of dependences.
4226
376
  for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
4227
376
    OMPTaskDataTy::DependData &DD =
4228
376
        Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
4229
376
    DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
4230
376
  }
4231
308
  auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD,
4232
308
                    &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
4233
    // Set proper addresses for generated private copies.
4234
308
    OMPPrivateScope Scope(CGF);
4235
308
    if (!Data.FirstprivateVars.empty()) {
4236
240
      llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
4237
240
          CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
4238
240
      enum { PrivatesParam = 2, CopyFnParam = 3 };
4239
240
      llvm::Value *CopyFn = CGF.Builder.CreateLoad(
4240
240
          CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
4241
240
      llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
4242
240
          CS->getCapturedDecl()->getParam(PrivatesParam)));
4243
      // Map privates.
4244
240
      llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
4245
240
      llvm::SmallVector<llvm::Value *, 16> CallArgs;
4246
240
      CallArgs.push_back(PrivatesPtr);
4247
768
      for (const Expr *E : Data.FirstprivateVars) {
4248
768
        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4249
768
        Address PrivatePtr =
4250
768
            CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4251
768
                              ".firstpriv.ptr.addr");
4252
768
        PrivatePtrs.emplace_back(VD, PrivatePtr);
4253
768
        CallArgs.push_back(PrivatePtr.getPointer());
4254
768
      }
4255
240
      CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4256
240
          CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
4257
768
      for (const auto &Pair : PrivatePtrs) {
4258
768
        Address Replacement(CGF.Builder.CreateLoad(Pair.second),
4259
768
                            CGF.getContext().getDeclAlign(Pair.first));
4260
768
        Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
4261
768
      }
4262
240
    }
4263
    // Privatize all private variables except for in_reduction items.
4264
308
    (void)Scope.Privatize();
4265
308
    if (InputInfo.NumberOfTargetItems > 0) {
4266
112
      InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
4267
112
          CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
4268
112
      InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
4269
112
          CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
4270
112
      InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
4271
112
          CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
4272
112
      InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP(
4273
112
          CGF.GetAddrOfLocalVar(MVD), /*Index=*/0);
4274
112
    }
4275
308
4276
308
    Action.Enter(CGF);
4277
308
    OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
4278
308
    BodyGen(CGF);
4279
308
  };
4280
308
  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
4281
308
      S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
4282
308
      Data.NumberOfParts);
4283
164
  llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 
1144
: 0);
4284
308
  IntegerLiteral IfCond(getContext(), TrueOrFalse,
4285
308
                        getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4286
308
                        SourceLocation());
4287
308
4288
308
  CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
4289
308
                                      SharedsTy, CapturedStruct, &IfCond, Data);
4290
308
}
4291
4292
175
void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
4293
  // Emit outlined function for task construct.
4294
175
  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
4295
175
  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
4296
175
  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4297
175
  const Expr *IfCond = nullptr;
4298
44
  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4299
44
    if (C->getNameModifier() == OMPD_unknown ||
4300
44
        
C->getNameModifier() == OMPD_task12
) {
4301
44
      IfCond = C->getCondition();
4302
44
      break;
4303
44
    }
4304
44
  }
4305
175
4306
175
  OMPTaskDataTy Data;
4307
  // Check if we should emit tied or untied task.
4308
175
  Data.Tied = !S.getSingleClause<OMPUntiedClause>();
4309
175
  auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
4310
175
    CGF.EmitStmt(CS->getCapturedStmt());
4311
175
  };
4312
175
  auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
4313
175
                    IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
4314
175
                            const OMPTaskDataTy &Data) {
4315
175
    CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
4316
175
                                            SharedsTy, CapturedStruct, IfCond,
4317
175
                                            Data);
4318
175
  };
4319
175
  auto LPCRegion =
4320
175
      CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4321
175
  EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
4322
175
}
4323
4324
void CodeGenFunction::EmitOMPTaskyieldDirective(
4325
16
    const OMPTaskyieldDirective &S) {
4326
16
  CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
4327
16
}
4328
4329
34
void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
4330
34
  CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
4331
34
}
4332
4333
12
void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
4334
12
  CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc());
4335
12
}
4336
4337
void CodeGenFunction::EmitOMPTaskgroupDirective(
4338
39
    const OMPTaskgroupDirective &S) {
4339
39
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4340
39
    Action.Enter(CGF);
4341
39
    if (const Expr *E = S.getReductionRef()) {
4342
26
      SmallVector<const Expr *, 4> LHSs;
4343
26
      SmallVector<const Expr *, 4> RHSs;
4344
26
      OMPTaskDataTy Data;
4345
26
      for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
4346
26
        Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
4347
26
        Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
4348
26
        Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
4349
26
        Data.ReductionOps.append(C->reduction_ops().begin(),
4350
26
                                 C->reduction_ops().end());
4351
26
        LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
4352
26
        RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
4353
26
      }
4354
26
      llvm::Value *ReductionDesc =
4355
26
          CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
4356
26
                                                           LHSs, RHSs, Data);
4357
26
      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4358
26
      CGF.EmitVarDecl(*VD);
4359
26
      CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
4360
26
                            /*Volatile=*/false, E->getType());
4361
26
    }
4362
39
    CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4363
39
  };
4364
39
  OMPLexicalScope Scope(*this, S, OMPD_unknown);
4365
39
  CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
4366
39
}
4367
4368
40
void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
4369
40
  llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>()
4370
8
                                ? llvm::AtomicOrdering::NotAtomic
4371
32
                                : llvm::AtomicOrdering::AcquireRelease;
4372
40
  CGM.getOpenMPRuntime().emitFlush(
4373
40
      *this,
4374
40
      [&S]() -> ArrayRef<const Expr *> {
4375
40
        if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
4376
8
          return llvm::makeArrayRef(FlushClause->varlist_begin(),
4377
8
                                    FlushClause->varlist_end());
4378
32
        return llvm::None;
4379
32
      }(),
4380
40
      S.getBeginLoc(), AO);
4381
40
}
4382
4383
14
void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) {
4384
14
  const auto *DO = S.getSingleClause<OMPDepobjClause>();
4385
14
  LValue DOLVal = EmitLValue(DO->getDepobj());
4386
14
  if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
4387
6
    OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
4388
6
                                           DC->getModifier());
4389
6
    Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
4390
6
    Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
4391
6
        *this, Dependencies, DC->getBeginLoc());
4392
6
    EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
4393
6
    return;
4394
6
  }
4395
8
  if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
4396
4
    CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc());
4397
4
    return;
4398
4
  }
4399
4
  if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) {
4400
4
    CGM.getOpenMPRuntime().emitUpdateClause(
4401
4
        *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
4402
4
    return;
4403
4
  }
4404
4
}
4405
4406
56
void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
4407
56
  if (!OMPParentLoopDirectiveForScan)
4408
8
    return;
4409
48
  const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan;
4410
48
  bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>();
4411
48
  SmallVector<const Expr *, 4> Shareds;
4412
48
  SmallVector<const Expr *, 4> Privates;
4413
48
  SmallVector<const Expr *, 4> LHSs;
4414
48
  SmallVector<const Expr *, 4> RHSs;
4415
48
  SmallVector<const Expr *, 4> ReductionOps;
4416
48
  SmallVector<const Expr *, 4> CopyOps;
4417
48
  SmallVector<const Expr *, 4> CopyArrayTemps;
4418
48
  SmallVector<const Expr *, 4> CopyArrayElems;
4419
48
  for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) {
4420
48
    if (C->getModifier() != OMPC_REDUCTION_inscan)
4421
0
      continue;
4422
48
    Shareds.append(C->varlist_begin(), C->varlist_end());
4423
48
    Privates.append(C->privates().begin(), C->privates().end());
4424
48
    LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
4425
48
    RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
4426
48
    ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
4427
48
    CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
4428
48
    CopyArrayTemps.append(C->copy_array_temps().begin(),
4429
48
                          C->copy_array_temps().end());
4430
48
    CopyArrayElems.append(C->copy_array_elems().begin(),
4431
48
                          C->copy_array_elems().end());
4432
48
  }
4433
48
  if (ParentDir.getDirectiveKind() == OMPD_simd ||
4434
40
      (getLangOpts().OpenMPSimd &&
4435
16
       
isOpenMPSimdDirective(ParentDir.getDirectiveKind())8
)) {
4436
    // For simd directive and simd-based directives in simd only mode, use the
4437
    // following codegen:
4438
    // int x = 0;
4439
    // #pragma omp simd reduction(inscan, +: x)
4440
    // for (..) {
4441
    //   <first part>
4442
    //   #pragma omp scan inclusive(x)
4443
    //   <second part>
4444
    //  }
4445
    // is transformed to:
4446
    // int x = 0;
4447
    // for (..) {
4448
    //   int x_priv = 0;
4449
    //   <first part>
4450
    //   x = x_priv + x;
4451
    //   x_priv = x;
4452
    //   <second part>
4453
    // }
4454
    // and
4455
    // int x = 0;
4456
    // #pragma omp simd reduction(inscan, +: x)
4457
    // for (..) {
4458
    //   <first part>
4459
    //   #pragma omp scan exclusive(x)
4460
    //   <second part>
4461
    // }
4462
    // to
4463
    // int x = 0;
4464
    // for (..) {
4465
    //   int x_priv = 0;
4466
    //   <second part>
4467
    //   int temp = x;
4468
    //   x = x_priv + x;
4469
    //   x_priv = temp;
4470
    //   <first part>
4471
    // }
4472
16
    llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce");
4473
16
    EmitBranch(IsInclusive
4474
8
                   ? OMPScanReduce
4475
8
                   : BreakContinueStack.back().ContinueBlock.getBlock());
4476
16
    EmitBlock(OMPScanDispatch);
4477
16
    {
4478
      // New scope for correct construction/destruction of temp variables for
4479
      // exclusive scan.
4480
16
      LexicalScope Scope(*this, S.getSourceRange());
4481
8
      EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock);
4482
16
      EmitBlock(OMPScanReduce);
4483
16
      if (!IsInclusive) {
4484
        // Create temp var and copy LHS value to this temp value.
4485
        // TMP = LHS;
4486
20
        for (unsigned I = 0, E = CopyArrayElems.size(); I < E; 
++I12
) {
4487
12
          const Expr *PrivateExpr = Privates[I];
4488
12
          const Expr *TempExpr = CopyArrayTemps[I];
4489
12
          EmitAutoVarDecl(
4490
12
              *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
4491
12
          LValue DestLVal = EmitLValue(TempExpr);
4492
12
          LValue SrcLVal = EmitLValue(LHSs[I]);
4493
12
          EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
4494
12
                      SrcLVal.getAddress(*this),
4495
12
                      cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
4496
12
                      cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
4497
12
                      CopyOps[I]);
4498
12
        }
4499
8
      }
4500
16
      CGM.getOpenMPRuntime().emitReduction(
4501
16
          *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
4502
16
          {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd});
4503
40
      for (unsigned I = 0, E = CopyArrayElems.size(); I < E; 
++I24
) {
4504
24
        const Expr *PrivateExpr = Privates[I];
4505
24
        LValue DestLVal;
4506
24
        LValue SrcLVal;
4507
24
        if (IsInclusive) {
4508
12
          DestLVal = EmitLValue(RHSs[I]);
4509
12
          SrcLVal = EmitLValue(LHSs[I]);
4510
12
        } else {
4511
12
          const Expr *TempExpr = CopyArrayTemps[I];
4512
12
          DestLVal = EmitLValue(RHSs[I]);
4513
12
          SrcLVal = EmitLValue(TempExpr);
4514
12
        }
4515
24
        EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
4516
24
                    SrcLVal.getAddress(*this),
4517
24
                    cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
4518
24
                    cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
4519
24
                    CopyOps[I]);
4520
24
      }
4521
16
    }
4522
8
    EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
4523
16
    OMPScanExitBlock = IsInclusive
4524
8
                           ? BreakContinueStack.back().ContinueBlock.getBlock()
4525
8
                           : OMPScanReduce;
4526
16
    EmitBlock(OMPAfterScanBlock);
4527
16
    return;
4528
16
  }
4529
32
  if (!IsInclusive) {
4530
16
    EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
4531
16
    EmitBlock(OMPScanExitBlock);
4532
16
  }
4533
32
  if (OMPFirstScanLoop) {
4534
    // Emit buffer[i] = red; at the end of the input phase.
4535
16
    const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
4536
16
                             .getIterationVariable()
4537
16
                             ->IgnoreParenImpCasts();
4538
16
    LValue IdxLVal = EmitLValue(IVExpr);
4539
16
    llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
4540
16
    IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
4541
48
    for (unsigned I = 0, E = CopyArrayElems.size(); I < E; 
++I32
) {
4542
32
      const Expr *PrivateExpr = Privates[I];
4543
32
      const Expr *OrigExpr = Shareds[I];
4544
32
      const Expr *CopyArrayElem = CopyArrayElems[I];
4545
32
      OpaqueValueMapping IdxMapping(
4546
32
          *this,
4547
32
          cast<OpaqueValueExpr>(
4548
32
              cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
4549
32
          RValue::get(IdxVal));
4550
32
      LValue DestLVal = EmitLValue(CopyArrayElem);
4551
32
      LValue SrcLVal = EmitLValue(OrigExpr);
4552
32
      EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
4553
32
                  SrcLVal.getAddress(*this),
4554
32
                  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
4555
32
                  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
4556
32
                  CopyOps[I]);
4557
32
    }
4558
16
  }
4559
32
  EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
4560
32
  if (IsInclusive) {
4561
16
    EmitBlock(OMPScanExitBlock);
4562
16
    EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
4563
16
  }
4564
32
  EmitBlock(OMPScanDispatch);
4565
32
  if (!OMPFirstScanLoop) {
4566
    // Emit red = buffer[i]; at the entrance to the scan phase.
4567
16
    const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
4568
16
                             .getIterationVariable()
4569
16
                             ->IgnoreParenImpCasts();
4570
16
    LValue IdxLVal = EmitLValue(IVExpr);
4571
16
    llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
4572
16
    IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
4573
16
    llvm::BasicBlock *ExclusiveExitBB = nullptr;
4574
16
    if (!IsInclusive) {
4575
8
      llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec");
4576
8
      ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit");
4577
8
      llvm::Value *Cmp = Builder.CreateIsNull(IdxVal);
4578
8
      Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB);
4579
8
      EmitBlock(ContBB);
4580
      // Use idx - 1 iteration for exclusive scan.
4581
8
      IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1));
4582
8
    }
4583
48
    for (unsigned I = 0, E = CopyArrayElems.size(); I < E; 
++I32
) {
4584
32
      const Expr *PrivateExpr = Privates[I];
4585
32
      const Expr *OrigExpr = Shareds[I];
4586
32
      const Expr *CopyArrayElem = CopyArrayElems[I];
4587
32
      OpaqueValueMapping IdxMapping(
4588
32
          *this,
4589
32
          cast<OpaqueValueExpr>(
4590
32
              cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
4591
32
          RValue::get(IdxVal));
4592
32
      LValue SrcLVal = EmitLValue(CopyArrayElem);
4593
32
      LValue DestLVal = EmitLValue(OrigExpr);
4594
32
      EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
4595
32
                  SrcLVal.getAddress(*this),
4596
32
                  cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
4597
32
                  cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
4598
32
                  CopyOps[I]);
4599
32
    }
4600
16
    if (!IsInclusive) {
4601
8
      EmitBlock(ExclusiveExitBB);
4602
8
    }
4603
16
  }
4604
16
  EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock
4605
16
                                               : OMPAfterScanBlock);
4606
32
  EmitBlock(OMPAfterScanBlock);
4607
32
}
4608
4609
void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
4610
                                            const CodeGenLoopTy &CodeGenLoop,
4611
4.21k
                                            Expr *IncExpr) {
4612
  // Emit the loop iteration variable.
4613
4.21k
  const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
4614
4.21k
  const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
4615
4.21k
  EmitVarDecl(*IVDecl);
4616
4.21k
4617
  // Emit the iterations count variable.
4618
  // If it is not a variable, Sema decided to calculate iterations count on each
4619
  // iteration (e.g., it is foldable into a constant).
4620
4.21k
  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
4621
0
    EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
4622
    // Emit calculation of the iterations count.
4623
0
    EmitIgnoredExpr(S.getCalcLastIteration());
4624
0
  }
4625
4.21k
4626
4.21k
  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
4627
4.21k
4628
4.21k
  bool HasLastprivateClause = false;
4629
  // Check pre-condition.
4630
4.21k
  {
4631
4.21k
    OMPLoopScope PreInitScope(*this, S);
4632
    // Skip the entire loop if we don't meet the precondition.
4633
    // If the condition constant folds and can be elided, avoid emitting the
4634
    // whole loop.
4635
4.21k
    bool CondConstant;
4636
4.21k
    llvm::BasicBlock *ContBlock = nullptr;
4637
4.21k
    if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
4638
3.32k
      if (!CondConstant)
4639
0
        return;
4640
890
    } else {
4641
890
      llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
4642
890
      ContBlock = createBasicBlock("omp.precond.end");
4643
890
      emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
4644
890
                  getProfileCount(&S));
4645
890
      EmitBlock(ThenBlock);
4646
890
      incrementProfileCounter(&S);
4647
890
    }
4648
4.21k
4649
4.21k
    emitAlignedClause(*this, S);
4650
    // Emit 'then' code.
4651
4.21k
    {
4652
      // Emit helper vars inits.
4653
4.21k
4654
4.21k
      LValue LB = EmitOMPHelperVar(
4655
4.21k
          *this, cast<DeclRefExpr>(
4656
4.21k
                     (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
4657
2.51k
                          ? S.getCombinedLowerBoundVariable()
4658
1.70k
                          : S.getLowerBoundVariable())));
4659
4.21k
      LValue UB = EmitOMPHelperVar(
4660
4.21k
          *this, cast<DeclRefExpr>(
4661
4.21k
                     (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
4662
2.51k
                          ? S.getCombinedUpperBoundVariable()
4663
1.70k
                          : S.getUpperBoundVariable())));
4664
4.21k
      LValue ST =
4665
4.21k
          EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
4666
4.21k
      LValue IL =
4667
4.21k
          EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
4668
4.21k
4669
4.21k
      OMPPrivateScope LoopScope(*this);
4670
4.21k
      if (EmitOMPFirstprivateClause(S, LoopScope)) {
4671
        // Emit implicit barrier to synchronize threads and avoid data races
4672
        // on initialization of firstprivate variables and post-update of
4673
        // lastprivate variables.
4674
0
        CGM.getOpenMPRuntime().emitBarrierCall(
4675
0
            *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
4676
0
            /*ForceSimpleCall=*/true);
4677
0
      }
4678
4.21k
      EmitOMPPrivateClause(S, LoopScope);
4679
4.21k
      if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
4680
2.20k
          !isOpenMPParallelDirective(S.getDirectiveKind()) &&
4681
896
          !isOpenMPTeamsDirective(S.getDirectiveKind()))
4682
150
        EmitOMPReductionClauseInit(S, LoopScope);
4683
4.21k
      HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
4684
4.21k
      EmitOMPPrivateLoopCounters(S, LoopScope);
4685
4.21k
      (void)LoopScope.Privatize();
4686
4.21k
      if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4687
2.50k
        CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
4688
4.21k
4689
      // Detect the distribute schedule kind and chunk.
4690
4.21k
      llvm::Value *Chunk = nullptr;
4691
4.21k
      OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
4692
4.21k
      if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
4693
472
        ScheduleKind = C->getDistScheduleKind();
4694
472
        if (const Expr *Ch = C->getChunkSize()) {
4695
286
          Chunk = EmitScalarExpr(Ch);
4696
286
          Chunk = EmitScalarConversion(Chunk, Ch->getType(),
4697
286
                                       S.getIterationVariable()->getType(),
4698
286
                                       S.getBeginLoc());
4699
286
        }
4700
3.74k
      } else {
4701
        // Default behaviour for dist_schedule clause.
4702
3.74k
        CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
4703
3.74k
            *this, S, ScheduleKind, Chunk);
4704
3.74k
      }
4705
4.21k
      const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
4706
4.21k
      const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
4707
4.21k
4708
      // OpenMP [2.10.8, distribute Construct, Description]
4709
      // If dist_schedule is specified, kind must be static. If specified,
4710
      // iterations are divided into chunks of size chunk_size, chunks are
4711
      // assigned to the teams of the league in a round-robin fashion in the
4712
      // order of the team number. When no chunk_size is specified, the
4713
      // iteration space is divided into chunks that are approximately equal
4714
      // in size, and at most one chunk is distributed to each team of the
4715
      // league. The size of the chunks is unspecified in this case.
4716
4.21k
      bool StaticChunked = RT.isStaticChunked(
4717
4.21k
          ScheduleKind, /* Chunked */ Chunk != nullptr) &&
4718
652
          isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
4719
4.21k
      if (RT.isStaticNonchunked(ScheduleKind,
4720
4.21k
                                /* Chunked */ Chunk != nullptr) ||
4721
4.05k
          
StaticChunked652
) {
4722
4.05k
        CGOpenMPRuntime::StaticRTInput StaticInit(
4723
4.05k
            IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this),
4724
4.05k
            LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
4725
3.56k
            StaticChunked ? 
Chunk488
: nullptr);
4726
4.05k
        RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
4727
4.05k
                                    StaticInit);
4728
4.05k
        JumpDest LoopExit =
4729
4.05k
            getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
4730
        // UB = min(UB, GlobalUB);
4731
4.05k
        EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
4732
2.51k
                            ? S.getCombinedEnsureUpperBound()
4733
1.54k
                            : S.getEnsureUpperBound());
4734
        // IV = LB;
4735
4.05k
        EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
4736
2.51k
                            ? S.getCombinedInit()
4737
1.54k
                            : S.getInit());
4738
4.05k
4739
4.05k
        const Expr *Cond =
4740
4.05k
            isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
4741
2.51k
                ? S.getCombinedCond()
4742
1.54k
                : S.getCond();
4743
4.05k
4744
4.05k
        if (StaticChunked)
4745
488
          Cond = S.getCombinedDistCond();
4746
4.05k
4747
        // For static unchunked schedules generate:
4748
        //
4749
        //  1. For distribute alone, codegen
4750
        //    while (idx <= UB) {
4751
        //      BODY;
4752
        //      ++idx;
4753
        //    }
4754
        //
4755
        //  2. When combined with 'for' (e.g. as in 'distribute parallel for')
4756
        //    while (idx <= UB) {
4757
        //      <CodeGen rest of pragma>(LB, UB);
4758
        //      idx += ST;
4759
        //    }
4760
        //
4761
        // For static chunk one schedule generate:
4762
        //
4763
        // while (IV <= GlobalUB) {
4764
        //   <CodeGen rest of pragma>(LB, UB);
4765
        //   LB += ST;
4766
        //   UB += ST;
4767
        //   UB = min(UB, GlobalUB);
4768
        //   IV = LB;
4769
        // }
4770
        //
4771
4.05k
        emitCommonSimdLoop(
4772
4.05k
            *this, S,
4773
4.03k
            [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4774
4.03k
              if (isOpenMPSimdDirective(S.getDirectiveKind()))
4775
2.09k
                CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true);
4776
4.03k
            },
4777
4.05k
            [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop,
4778
4.10k
             StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) {
4779
4.10k
              CGF.EmitOMPInnerLoop(
4780
4.10k
                  S, LoopScope.requiresCleanups(), Cond, IncExpr,
4781
4.10k
                  [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
4782
4.10k
                    CodeGenLoop(CGF, S, LoopExit);
4783
4.10k
                  },
4784
4.10k
                  [&S, StaticChunked](CodeGenFunction &CGF) {
4785
4.10k
                    if (StaticChunked) {
4786
491
                      CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
4787
491
                      CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
4788
491
                      CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
4789
491
                      CGF.EmitIgnoredExpr(S.getCombinedInit());
4790
491
                    }
4791
4.10k
                  });
4792
4.10k
            });
4793
4.05k
        EmitBlock(LoopExit.getBlock());
4794
        // Tell the runtime we are done.
4795
4.05k
        RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind());
4796
164
      } else {
4797
        // Emit the outer loop, which requests its work chunk [LB..UB] from
4798
        // runtime and runs the inner loop to process it.
4799
164
        const OMPLoopArguments LoopArguments = {
4800
164
            LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
4801
164
            IL.getAddress(*this), Chunk};
4802
164
        EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
4803
164
                                   CodeGenLoop);
4804
164
      }
4805
4.21k
      if (isOpenMPSimdDirective(S.getDirectiveKind())) {
4806
2.20k
        EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
4807
2.20k
          return CGF.Builder.CreateIsNotNull(
4808
2.20k
              CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
4809
2.20k
        });
4810
2.20k
      }
4811
4.21k
      if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
4812
2.20k
          !isOpenMPParallelDirective(S.getDirectiveKind()) &&
4813
896
          !isOpenMPTeamsDirective(S.getDirectiveKind())) {
4814
150
        EmitOMPReductionClauseFinal(S, OMPD_simd);
4815
        // Emit post-update of the reduction variables if IsLastIter != 0.
4816
150
        emitPostUpdateForReductionClause(
4817
0
            *this, S, [IL, &S](CodeGenFunction &CGF) {
4818
0
              return CGF.Builder.CreateIsNotNull(
4819
0
                  CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
4820
0
            });
4821
150
      }
4822
      // Emit final copy of the lastprivate variables if IsLastIter != 0.
4823
4.21k
      if (HasLastprivateClause) {
4824
216
        EmitOMPLastprivateClauseFinal(
4825
216
            S, /*NoFinals=*/false,
4826
216
            Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
4827
216
      }
4828
4.21k
    }
4829
4.21k
4830
    // We're now done with the loop, so jump to the continuation block.
4831
4.21k
    if (ContBlock) {
4832
890
      EmitBranch(ContBlock);
4833
890
      EmitBlock(ContBlock, true);
4834
890
    }
4835
4.21k
  }
4836
4.21k
}
4837
4838
void CodeGenFunction::EmitOMPDistributeDirective(
4839
110
    const OMPDistributeDirective &S) {
4840
110
  auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4841
110
    CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4842
110
  };
4843
110
  OMPLexicalScope Scope(*this, S, OMPD_unknown);
4844
110
  CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
4845
110
}
4846
4847
static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
4848
                                                   const CapturedStmt *S,
4849
8
                                                   SourceLocation Loc) {
4850
8
  CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4851
8
  CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
4852
8
  CGF.CapturedStmtInfo = &CapStmtInfo;
4853
8
  llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc);
4854
8
  Fn->setDoesNotRecurse();
4855
8
  return Fn;
4856
8
}
4857
4858
40
void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
4859
40
  if (S.hasClausesOfKind<OMPDependClause>()) {
4860
16
    assert(!S.hasAssociatedStmt() &&
4861
16
           "No associated statement must be in ordered depend construct.");
4862
16
    for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
4863
18
      CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
4864
16
    return;
4865
16
  }
4866
24
  const auto *C = S.getSingleClause<OMPSIMDClause>();
4867
24
  auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
4868
24
                                 PrePostActionTy &Action) {
4869
24
    const CapturedStmt *CS = S.getInnermostCapturedStmt();
4870
24
    if (C) {
4871
8
      llvm::SmallVector<llvm::Value *, 16> CapturedVars;
4872
8
      CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
4873
8
      llvm::Function *OutlinedFn =
4874
8
          emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
4875
8
      CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
4876
8
                                                      OutlinedFn, CapturedVars);
4877
16
    } else {
4878
16
      Action.Enter(CGF);
4879
16
      CGF.EmitStmt(CS->getCapturedStmt());
4880
16
    }
4881
24
  };
4882
24
  OMPLexicalScope Scope(*this, S, OMPD_unknown);
4883
24
  CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C);
4884
24
}
4885
4886
static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
4887
                                         QualType SrcType, QualType DestType,
4888
291
                                         SourceLocation Loc) {
4889
291
  assert(CGF.hasScalarEvaluationKind(DestType) &&
4890
291
         "DestType must have scalar evaluation kind.");
4891
291
  assert(!Val.isAggregate() && "Must be a scalar or complex.");
4892
287
  return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
4893
287
                                                   DestType, Loc)
4894
4
                        : CGF.EmitComplexToScalarConversion(
4895
4
                              Val.getComplexVal(), SrcType, DestType, Loc);
4896
291
}
4897
4898
static CodeGenFunction::ComplexPairTy
4899
convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
4900
24
                      QualType DestType, SourceLocation Loc) {
4901
24
  assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
4902
24
         "DestType must have complex evaluation kind.");
4903
24
  CodeGenFunction::ComplexPairTy ComplexVal;
4904
24
  if (Val.isScalar()) {
4905
    // Convert the input element to the element type of the complex.
4906
6
    QualType DestElementType =
4907
6
        DestType->castAs<ComplexType>()->getElementType();
4908
6
    llvm::Value *ScalarVal = CGF.EmitScalarConversion(
4909
6
        Val.getScalarVal(), SrcType, DestElementType, Loc);
4910
6
    ComplexVal = CodeGenFunction::ComplexPairTy(
4911
6
        ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
4912
18
  } else {
4913
18
    assert(Val.isComplex() && "Must be a scalar or complex.");
4914
18
    QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
4915
18
    QualType DestElementType =
4916
18
        DestType->castAs<ComplexType>()->getElementType();
4917
18
    ComplexVal.first = CGF.EmitScalarConversion(
4918
18
        Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
4919
18
    ComplexVal.second = CGF.EmitScalarConversion(
4920
18
        Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
4921
18
  }
4922
24
  return ComplexVal;
4923
24
}
4924
4925
static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
4926
110
                                  LValue LVal, RValue RVal) {
4927
110
  if (LVal.isGlobalReg())
4928
0
    CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
4929
110
  else
4930
110
    CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false);
4931
110
}
4932
4933
static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF,
4934
                                   llvm::AtomicOrdering AO, LValue LVal,
4935
108
                                   SourceLocation Loc) {
4936
108
  if (LVal.isGlobalReg())
4937
2
    return CGF.EmitLoadOfLValue(LVal, Loc);
4938
106
  return CGF.EmitAtomicLoad(
4939
106
      LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO),
4940
106
      LVal.isVolatile());
4941
106
}
4942
4943
void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
4944
313
                                         QualType RValTy, SourceLocation Loc) {
4945
313
  switch (getEvaluationKind(LVal.getType())) {
4946
289
  case TEK_Scalar:
4947
289
    EmitStoreThroughLValue(RValue::get(convertToScalarValue(
4948
289
                               *this, RVal, RValTy, LVal.getType(), Loc)),
4949
289
                           LVal);
4950
289
    break;
4951
24
  case TEK_Complex:
4952
24
    EmitStoreOfComplex(
4953
24
        convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
4954
24
        /*isInit=*/false);
4955
24
    break;
4956
0
  case TEK_Aggregate:
4957
0
    llvm_unreachable("Must be a scalar or complex.");
4958
313
  }
4959
313
}
4960
4961
static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
4962
                                  const Expr *X, const Expr *V,
4963
108
                                  SourceLocation Loc) {
4964
  // v = x;
4965
108
  assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
4966
108
  assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
4967
108
  LValue XLValue = CGF.EmitLValue(X);
4968
108
  LValue VLValue = CGF.EmitLValue(V);
4969
108
  RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc);
4970
  // OpenMP, 2.17.7, atomic Construct
4971
  // If the read or capture clause is specified and the acquire, acq_rel, or
4972
  // seq_cst clause is specified then the strong flush on exit from the atomic
4973
  // operation is also an acquire flush.
4974
108
  switch (AO) {
4975
14
  case llvm::AtomicOrdering::Acquire:
4976
14
  case llvm::AtomicOrdering::AcquireRelease:
4977
14
  case llvm::AtomicOrdering::SequentiallyConsistent:
4978
14
    CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
4979
14
                                         llvm::AtomicOrdering::Acquire);
4980
14
    break;
4981
94
  case llvm::AtomicOrdering::Monotonic:
4982
94
  case llvm::AtomicOrdering::Release:
4983
94
    break;
4984
0
  case llvm::AtomicOrdering::NotAtomic:
4985
0
  case llvm::AtomicOrdering::Unordered:
4986
0
    llvm_unreachable("Unexpected ordering.");
4987
108
  }
4988
108
  CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
4989
108
  CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
4990
108
}
4991
4992
static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF,
4993
                                   llvm::AtomicOrdering AO, const Expr *X,
4994
110
                                   const Expr *E, SourceLocation Loc) {
4995
  // x = expr;
4996
110
  assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
4997
110
  emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
4998
110
  CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
4999
  // OpenMP, 2.17.7, atomic Construct
5000
  // If the write, update, or capture clause is specified and the release,
5001
  // acq_rel, or seq_cst clause is specified then the strong flush on entry to
5002
  // the atomic operation is also a release flush.
5003
110
  switch (AO) {
5004
14
  case llvm::AtomicOrdering::Release:
5005
14
  case llvm::AtomicOrdering::AcquireRelease:
5006
14
  case llvm::AtomicOrdering::SequentiallyConsistent:
5007
14
    CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
5008
14
                                         llvm::AtomicOrdering::Release);
5009
14
    break;
5010
96
  case llvm::AtomicOrdering::Acquire:
5011
96
  case llvm::AtomicOrdering::Monotonic:
5012
96
    break;
5013
0
  case llvm::AtomicOrdering::NotAtomic:
5014
0
  case llvm::AtomicOrdering::Unordered:
5015
0
    llvm_unreachable("Unexpected ordering.");
5016
110
  }
5017
110
}
5018
5019
static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
5020
                                                RValue Update,
5021
                                                BinaryOperatorKind BO,
5022
                                                llvm::AtomicOrdering AO,
5023
653
                                                bool IsXLHSInRHSPart) {
5024
653
  ASTContext &Context = CGF.getContext();
5025
  // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
5026