Coverage Report

Created: 2022-01-18 06:27

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGStmt.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Stmt nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGDebugInfo.h"
14
#include "CGOpenMPRuntime.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "TargetInfo.h"
18
#include "clang/AST/Attr.h"
19
#include "clang/AST/Expr.h"
20
#include "clang/AST/Stmt.h"
21
#include "clang/AST/StmtVisitor.h"
22
#include "clang/Basic/Builtins.h"
23
#include "clang/Basic/DiagnosticSema.h"
24
#include "clang/Basic/PrettyStackTrace.h"
25
#include "clang/Basic/SourceManager.h"
26
#include "clang/Basic/TargetInfo.h"
27
#include "llvm/ADT/SmallSet.h"
28
#include "llvm/ADT/StringExtras.h"
29
#include "llvm/IR/Assumptions.h"
30
#include "llvm/IR/DataLayout.h"
31
#include "llvm/IR/InlineAsm.h"
32
#include "llvm/IR/Intrinsics.h"
33
#include "llvm/IR/MDBuilder.h"
34
#include "llvm/Support/SaveAndRestore.h"
35
36
using namespace clang;
37
using namespace CodeGen;
38
39
//===----------------------------------------------------------------------===//
40
//                              Statement Emission
41
//===----------------------------------------------------------------------===//
42
43
854k
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
44
854k
  if (CGDebugInfo *DI = getDebugInfo()) {
45
511k
    SourceLocation Loc;
46
511k
    Loc = S->getBeginLoc();
47
511k
    DI->EmitLocation(Builder, Loc);
48
49
511k
    LastStopPoint = Loc;
50
511k
  }
51
854k
}
52
53
943k
void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
54
943k
  assert(S && "Null statement?");
55
0
  PGO.setCurrentStmt(S);
56
57
  // These statements have their own debug info handling.
58
943k
  if (EmitSimpleStmt(S, Attrs))
59
333k
    return;
60
61
  // Check if we are generating unreachable code.
62
610k
  if (!HaveInsertPoint()) {
63
    // If so, and the statement doesn't contain a label, then we do not need to
64
    // generate actual code. This is safe because (1) the current point is
65
    // unreachable, so we don't need to execute the code, and (2) we've already
66
    // handled the statements which update internal data structures (like the
67
    // local variable map) which could be used by subsequent statements.
68
157
    if (!ContainsLabel(S)) {
69
      // Verify that any decl statements were handled as simple, they may be in
70
      // scope of subsequent reachable statements.
71
152
      assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
72
0
      return;
73
152
    }
74
75
    // Otherwise, make a new block to hold the code.
76
5
    EnsureInsertPoint();
77
5
  }
78
79
  // Generate a stoppoint if we are emitting debug info.
80
610k
  EmitStopPoint(S);
81
82
  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
83
  // enabled.
84
610k
  if (getLangOpts().OpenMP && 
getLangOpts().OpenMPSimd95.4k
) {
85
48.1k
    if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
86
14.3k
      EmitSimpleOMPExecutableDirective(*D);
87
14.3k
      return;
88
14.3k
    }
89
48.1k
  }
90
91
595k
  switch (S->getStmtClass()) {
92
0
  case Stmt::NoStmtClass:
93
0
  case Stmt::CXXCatchStmtClass:
94
0
  case Stmt::SEHExceptStmtClass:
95
0
  case Stmt::SEHFinallyStmtClass:
96
0
  case Stmt::MSDependentExistsStmtClass:
97
0
    llvm_unreachable("invalid statement class to emit generically");
98
0
  case Stmt::NullStmtClass:
99
0
  case Stmt::CompoundStmtClass:
100
0
  case Stmt::DeclStmtClass:
101
0
  case Stmt::LabelStmtClass:
102
0
  case Stmt::AttributedStmtClass:
103
0
  case Stmt::GotoStmtClass:
104
0
  case Stmt::BreakStmtClass:
105
0
  case Stmt::ContinueStmtClass:
106
0
  case Stmt::DefaultStmtClass:
107
0
  case Stmt::CaseStmtClass:
108
0
  case Stmt::SEHLeaveStmtClass:
109
0
    llvm_unreachable("should have emitted these statements as simple");
110
111
0
#define STMT(Type, Base)
112
0
#define ABSTRACT_STMT(Op)
113
0
#define EXPR(Type, Base) \
114
25.7M
  case Stmt::Type##Class:
115
286k
#include 
"clang/AST/StmtNodes.inc"0
116
286k
  {
117
    // Remember the block we came in on.
118
286k
    llvm::BasicBlock *incoming = Builder.GetInsertBlock();
119
286k
    assert(incoming && "expression emission must have an insertion point");
120
121
0
    EmitIgnoredExpr(cast<Expr>(S));
122
123
286k
    llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
124
286k
    assert(outgoing && "expression emission cleared block!");
125
126
    // The expression emitters assume (reasonably!) that the insertion
127
    // point is always set.  To maintain that, the call-emission code
128
    // for noreturn functions has to enter a new block with no
129
    // predecessors.  We want to kill that block and mark the current
130
    // insertion point unreachable in the common case of a call like
131
    // "exit();".  Since expression emission doesn't otherwise create
132
    // blocks with no predecessors, we can just test for that.
133
    // However, we must be careful not to do this to our incoming
134
    // block, because *statement* emission does sometimes create
135
    // reachable blocks which will have no predecessors until later in
136
    // the function.  This occurs with, e.g., labels that are not
137
    // reachable by fallthrough.
138
286k
    if (incoming != outgoing && 
outgoing->use_empty()21.5k
) {
139
2.80k
      outgoing->eraseFromParent();
140
2.80k
      Builder.ClearInsertionPoint();
141
2.80k
    }
142
286k
    break;
143
25.4M
  }
144
145
22
  case Stmt::IndirectGotoStmtClass:
146
22
    EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
147
148
115k
  case Stmt::IfStmtClass:      EmitIfStmt(cast<IfStmt>(*S));              break;
149
1.89k
  case Stmt::WhileStmtClass:   EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
150
756
  case Stmt::DoStmtClass:      EmitDoStmt(cast<DoStmt>(*S), Attrs);       break;
151
16.9k
  case Stmt::ForStmtClass:     EmitForStmt(cast<ForStmt>(*S), Attrs);     break;
152
153
153k
  case Stmt::ReturnStmtClass:  EmitReturnStmt(cast<ReturnStmt>(*S));      break;
154
155
442
  case Stmt::SwitchStmtClass:  EmitSwitchStmt(cast<SwitchStmt>(*S));      break;
156
1.63k
  case Stmt::GCCAsmStmtClass:  // Intentional fall-through.
157
1.80k
  case Stmt::MSAsmStmtClass:   EmitAsmStmt(cast<AsmStmt>(*S));            break;
158
120
  case Stmt::CoroutineBodyStmtClass:
159
120
    EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
160
120
    break;
161
117
  case Stmt::CoreturnStmtClass:
162
117
    EmitCoreturnStmt(cast<CoreturnStmt>(*S));
163
117
    break;
164
27
  case Stmt::CapturedStmtClass: {
165
27
    const CapturedStmt *CS = cast<CapturedStmt>(S);
166
27
    EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
167
27
    }
168
27
    break;
169
220
  case Stmt::ObjCAtTryStmtClass:
170
220
    EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
171
220
    break;
172
0
  case Stmt::ObjCAtCatchStmtClass:
173
0
    llvm_unreachable(
174
0
                    "@catch statements should be handled by EmitObjCAtTryStmt");
175
0
  case Stmt::ObjCAtFinallyStmtClass:
176
0
    llvm_unreachable(
177
0
                  "@finally statements should be handled by EmitObjCAtTryStmt");
178
50
  case Stmt::ObjCAtThrowStmtClass:
179
50
    EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
180
50
    break;
181
13
  case Stmt::ObjCAtSynchronizedStmtClass:
182
13
    EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
183
13
    break;
184
68
  case Stmt::ObjCForCollectionStmtClass:
185
68
    EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
186
68
    break;
187
113
  case Stmt::ObjCAutoreleasePoolStmtClass:
188
113
    EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
189
113
    break;
190
191
279
  case Stmt::CXXTryStmtClass:
192
279
    EmitCXXTryStmt(cast<CXXTryStmt>(*S));
193
279
    break;
194
129
  case Stmt::CXXForRangeStmtClass:
195
129
    EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
196
129
    break;
197
137
  case Stmt::SEHTryStmtClass:
198
137
    EmitSEHTryStmt(cast<SEHTryStmt>(*S));
199
137
    break;
200
0
  case Stmt::OMPMetaDirectiveClass:
201
0
    EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
202
0
    break;
203
32
  case Stmt::OMPCanonicalLoopClass:
204
32
    EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
205
32
    break;
206
1.04k
  case Stmt::OMPParallelDirectiveClass:
207
1.04k
    EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
208
1.04k
    break;
209
181
  case Stmt::OMPSimdDirectiveClass:
210
181
    EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
211
181
    break;
212
8
  case Stmt::OMPTileDirectiveClass:
213
8
    EmitOMPTileDirective(cast<OMPTileDirective>(*S));
214
8
    break;
215
18
  case Stmt::OMPUnrollDirectiveClass:
216
18
    EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
217
18
    break;
218
423
  case Stmt::OMPForDirectiveClass:
219
423
    EmitOMPForDirective(cast<OMPForDirective>(*S));
220
423
    break;
221
253
  case Stmt::OMPForSimdDirectiveClass:
222
253
    EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
223
253
    break;
224
62
  case Stmt::OMPSectionsDirectiveClass:
225
62
    EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
226
62
    break;
227
54
  case Stmt::OMPSectionDirectiveClass:
228
54
    EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
229
54
    break;
230
58
  case Stmt::OMPSingleDirectiveClass:
231
58
    EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
232
58
    break;
233
25
  case Stmt::OMPMasterDirectiveClass:
234
25
    EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
235
25
    break;
236
102
  case Stmt::OMPCriticalDirectiveClass:
237
102
    EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
238
102
    break;
239
266
  case Stmt::OMPParallelForDirectiveClass:
240
266
    EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
241
266
    break;
242
105
  case Stmt::OMPParallelForSimdDirectiveClass:
243
105
    EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
244
105
    break;
245
22
  case Stmt::OMPParallelMasterDirectiveClass:
246
22
    EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
247
22
    break;
248
26
  case Stmt::OMPParallelSectionsDirectiveClass:
249
26
    EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
250
26
    break;
251
193
  case Stmt::OMPTaskDirectiveClass:
252
193
    EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
253
193
    break;
254
16
  case Stmt::OMPTaskyieldDirectiveClass:
255
16
    EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
256
16
    break;
257
30
  case Stmt::OMPBarrierDirectiveClass:
258
30
    EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
259
30
    break;
260
14
  case Stmt::OMPTaskwaitDirectiveClass:
261
14
    EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
262
14
    break;
263
39
  case Stmt::OMPTaskgroupDirectiveClass:
264
39
    EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
265
39
    break;
266
40
  case Stmt::OMPFlushDirectiveClass:
267
40
    EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
268
40
    break;
269
14
  case Stmt::OMPDepobjDirectiveClass:
270
14
    EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
271
14
    break;
272
36
  case Stmt::OMPScanDirectiveClass:
273
36
    EmitOMPScanDirective(cast<OMPScanDirective>(*S));
274
36
    break;
275
80
  case Stmt::OMPOrderedDirectiveClass:
276
80
    EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
277
80
    break;
278
555
  case Stmt::OMPAtomicDirectiveClass:
279
555
    EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
280
555
    break;
281
5.03k
  case Stmt::OMPTargetDirectiveClass:
282
5.03k
    EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
283
5.03k
    break;
284
1.00k
  case Stmt::OMPTeamsDirectiveClass:
285
1.00k
    EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
286
1.00k
    break;
287
48
  case Stmt::OMPCancellationPointDirectiveClass:
288
48
    EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
289
48
    break;
290
163
  case Stmt::OMPCancelDirectiveClass:
291
163
    EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
292
163
    break;
293
163
  case Stmt::OMPTargetDataDirectiveClass:
294
163
    EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
295
163
    break;
296
107
  case Stmt::OMPTargetEnterDataDirectiveClass:
297
107
    EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
298
107
    break;
299
77
  case Stmt::OMPTargetExitDataDirectiveClass:
300
77
    EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
301
77
    break;
302
541
  case Stmt::OMPTargetParallelDirectiveClass:
303
541
    EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
304
541
    break;
305
344
  case Stmt::OMPTargetParallelForDirectiveClass:
306
344
    EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
307
344
    break;
308
39
  case Stmt::OMPTaskLoopDirectiveClass:
309
39
    EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
310
39
    break;
311
40
  case Stmt::OMPTaskLoopSimdDirectiveClass:
312
40
    EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
313
40
    break;
314
35
  case Stmt::OMPMasterTaskLoopDirectiveClass:
315
35
    EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
316
35
    break;
317
40
  case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
318
40
    EmitOMPMasterTaskLoopSimdDirective(
319
40
        cast<OMPMasterTaskLoopSimdDirective>(*S));
320
40
    break;
321
33
  case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
322
33
    EmitOMPParallelMasterTaskLoopDirective(
323
33
        cast<OMPParallelMasterTaskLoopDirective>(*S));
324
33
    break;
325
39
  case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
326
39
    EmitOMPParallelMasterTaskLoopSimdDirective(
327
39
        cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
328
39
    break;
329
110
  case Stmt::OMPDistributeDirectiveClass:
330
110
    EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
331
110
    break;
332
233
  case Stmt::OMPTargetUpdateDirectiveClass:
333
233
    EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
334
233
    break;
335
401
  case Stmt::OMPDistributeParallelForDirectiveClass:
336
401
    EmitOMPDistributeParallelForDirective(
337
401
        cast<OMPDistributeParallelForDirective>(*S));
338
401
    break;
339
313
  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
340
313
    EmitOMPDistributeParallelForSimdDirective(
341
313
        cast<OMPDistributeParallelForSimdDirective>(*S));
342
313
    break;
343
150
  case Stmt::OMPDistributeSimdDirectiveClass:
344
150
    EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
345
150
    break;
346
310
  case Stmt::OMPTargetParallelForSimdDirectiveClass:
347
310
    EmitOMPTargetParallelForSimdDirective(
348
310
        cast<OMPTargetParallelForSimdDirective>(*S));
349
310
    break;
350
354
  case Stmt::OMPTargetSimdDirectiveClass:
351
354
    EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
352
354
    break;
353
114
  case Stmt::OMPTeamsDistributeDirectiveClass:
354
114
    EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
355
114
    break;
356
126
  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
357
126
    EmitOMPTeamsDistributeSimdDirective(
358
126
        cast<OMPTeamsDistributeSimdDirective>(*S));
359
126
    break;
360
328
  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
361
328
    EmitOMPTeamsDistributeParallelForSimdDirective(
362
328
        cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
363
328
    break;
364
340
  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
365
340
    EmitOMPTeamsDistributeParallelForDirective(
366
340
        cast<OMPTeamsDistributeParallelForDirective>(*S));
367
340
    break;
368
757
  case Stmt::OMPTargetTeamsDirectiveClass:
369
757
    EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
370
757
    break;
371
462
  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
372
462
    EmitOMPTargetTeamsDistributeDirective(
373
462
        cast<OMPTargetTeamsDistributeDirective>(*S));
374
462
    break;
375
498
  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
376
498
    EmitOMPTargetTeamsDistributeParallelForDirective(
377
498
        cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
378
498
    break;
379
557
  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
380
557
    EmitOMPTargetTeamsDistributeParallelForSimdDirective(
381
557
        cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
382
557
    break;
383
428
  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
384
428
    EmitOMPTargetTeamsDistributeSimdDirective(
385
428
        cast<OMPTargetTeamsDistributeSimdDirective>(*S));
386
428
    break;
387
0
  case Stmt::OMPInteropDirectiveClass:
388
0
    llvm_unreachable("Interop directive not supported yet.");
389
0
    break;
390
0
  case Stmt::OMPDispatchDirectiveClass:
391
0
    llvm_unreachable("Dispatch directive not supported yet.");
392
0
    break;
393
40
  case Stmt::OMPMaskedDirectiveClass:
394
40
    EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
395
40
    break;
396
0
  case Stmt::OMPGenericLoopDirectiveClass:
397
0
    EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
398
0
    break;
399
595k
  }
400
595k
}
401
402
bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
403
943k
                                     ArrayRef<const Attr *> Attrs) {
404
943k
  switch (S->getStmtClass()) {
405
610k
  default:
406
610k
    return false;
407
11.5k
  case Stmt::NullStmtClass:
408
11.5k
    break;
409
106k
  case Stmt::CompoundStmtClass:
410
106k
    EmitCompoundStmt(cast<CompoundStmt>(*S));
411
106k
    break;
412
194k
  case Stmt::DeclStmtClass:
413
194k
    EmitDeclStmt(cast<DeclStmt>(*S));
414
194k
    break;
415
206
  case Stmt::LabelStmtClass:
416
206
    EmitLabelStmt(cast<LabelStmt>(*S));
417
206
    break;
418
274
  case Stmt::AttributedStmtClass:
419
274
    EmitAttributedStmt(cast<AttributedStmt>(*S));
420
274
    break;
421
3.12k
  case Stmt::GotoStmtClass:
422
3.12k
    EmitGotoStmt(cast<GotoStmt>(*S));
423
3.12k
    break;
424
5.65k
  case Stmt::BreakStmtClass:
425
5.65k
    EmitBreakStmt(cast<BreakStmt>(*S));
426
5.65k
    break;
427
10.7k
  case Stmt::ContinueStmtClass:
428
10.7k
    EmitContinueStmt(cast<ContinueStmt>(*S));
429
10.7k
    break;
430
182
  case Stmt::DefaultStmtClass:
431
182
    EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
432
182
    break;
433
985
  case Stmt::CaseStmtClass:
434
985
    EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
435
985
    break;
436
10
  case Stmt::SEHLeaveStmtClass:
437
10
    EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
438
10
    break;
439
943k
  }
440
333k
  return true;
441
943k
}
442
443
/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
444
/// this captures the expression result of the last sub-statement and returns it
445
/// (for use by the statement expression extension).
446
Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
447
110k
                                          AggValueSlot AggSlot) {
448
110k
  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
449
110k
                             "LLVM IR generation of compound statement ('{}')");
450
451
  // Keep track of the current cleanup stack depth, including debug scopes.
452
110k
  LexicalScope Scope(*this, S.getSourceRange());
453
454
110k
  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
455
110k
}
456
457
Address
458
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
459
                                              bool GetLast,
460
321k
                                              AggValueSlot AggSlot) {
461
462
321k
  const Stmt *ExprResult = S.getStmtExprResult();
463
321k
  assert((!GetLast || (GetLast && ExprResult)) &&
464
321k
         "If GetLast is true then the CompoundStmt must have a StmtExprResult");
465
466
0
  Address RetAlloca = Address::invalid();
467
468
690k
  for (auto *CurStmt : S.body()) {
469
690k
    if (GetLast && 
ExprResult == CurStmt15.8k
) {
470
      // We have to special case labels here.  They are statements, but when put
471
      // at the end of a statement expression, they yield the value of their
472
      // subexpression.  Handle this by walking through all labels we encounter,
473
      // emitting them before we evaluate the subexpr.
474
      // Similar issues arise for attributed statements.
475
3.64k
      while (!isa<Expr>(ExprResult)) {
476
5
        if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
477
3
          EmitLabel(LS->getDecl());
478
3
          ExprResult = LS->getSubStmt();
479
3
        } else 
if (const auto *2
AS2
= dyn_cast<AttributedStmt>(ExprResult)) {
480
          // FIXME: Update this if we ever have attributes that affect the
481
          // semantics of an expression.
482
2
          ExprResult = AS->getSubStmt();
483
2
        } else {
484
0
          llvm_unreachable("unknown value statement");
485
0
        }
486
5
      }
487
488
3.63k
      EnsureInsertPoint();
489
490
3.63k
      const Expr *E = cast<Expr>(ExprResult);
491
3.63k
      QualType ExprTy = E->getType();
492
3.63k
      if (hasAggregateEvaluationKind(ExprTy)) {
493
647
        EmitAggExpr(E, AggSlot);
494
2.98k
      } else {
495
        // We can't return an RValue here because there might be cleanups at
496
        // the end of the StmtExpr.  Because of that, we have to emit the result
497
        // here into a temporary alloca.
498
2.98k
        RetAlloca = CreateMemTemp(ExprTy);
499
2.98k
        EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
500
2.98k
                         /*IsInit*/ false);
501
2.98k
      }
502
687k
    } else {
503
687k
      EmitStmt(CurStmt);
504
687k
    }
505
690k
  }
506
507
321k
  return RetAlloca;
508
321k
}
509
510
917
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
511
917
  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
512
513
  // If there is a cleanup stack, then we it isn't worth trying to
514
  // simplify this block (we would need to remove it from the scope map
515
  // and cleanup entry).
516
917
  if (!EHStack.empty())
517
136
    return;
518
519
  // Can only simplify direct branches.
520
781
  if (!BI || !BI->isUnconditional())
521
0
    return;
522
523
  // Can only simplify empty blocks.
524
781
  if (BI->getIterator() != BB->begin())
525
1
    return;
526
527
780
  BB->replaceAllUsesWith(BI->getSuccessor(0));
528
780
  BI->eraseFromParent();
529
780
  BB->eraseFromParent();
530
780
}
531
532
583k
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
533
583k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
534
535
  // Fall out of the current block (if necessary).
536
583k
  EmitBranch(BB);
537
538
583k
  if (IsFinished && 
BB->use_empty()153k
) {
539
679
    delete BB;
540
679
    return;
541
679
  }
542
543
  // Place the block after the current block, if possible, or else at
544
  // the end of the function.
545
582k
  if (CurBB && 
CurBB->getParent()374k
)
546
374k
    CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
547
208k
  else
548
208k
    CurFn->getBasicBlockList().push_back(BB);
549
582k
  Builder.SetInsertPoint(BB);
550
582k
}
551
552
762k
void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
553
  // Emit a branch from the current block to the target one if this
554
  // was a real block.  If this was just a fall-through block after a
555
  // terminator, don't emit it.
556
762k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
557
558
762k
  if (!CurBB || 
CurBB->getTerminator()528k
) {
559
    // If there is no insert point or the previous block is already
560
    // terminated, don't touch it.
561
487k
  } else {
562
    // Otherwise, create a fall-through branch.
563
274k
    Builder.CreateBr(Target);
564
274k
  }
565
566
762k
  Builder.ClearInsertionPoint();
567
762k
}
568
569
664
void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
570
664
  bool inserted = false;
571
664
  for (llvm::User *u : block->users()) {
572
664
    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
573
664
      CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
574
664
                                             block);
575
664
      inserted = true;
576
664
      break;
577
664
    }
578
664
  }
579
580
664
  if (!inserted)
581
0
    CurFn->getBasicBlockList().push_back(block);
582
583
664
  Builder.SetInsertPoint(block);
584
664
}
585
586
CodeGenFunction::JumpDest
587
3.23k
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
588
3.23k
  JumpDest &Dest = LabelMap[D];
589
3.23k
  if (Dest.isValid()) 
return Dest3.08k
;
590
591
  // Create, but don't insert, the new block.
592
146
  Dest = JumpDest(createBasicBlock(D->getName()),
593
146
                  EHScopeStack::stable_iterator::invalid(),
594
146
                  NextCleanupDestIndex++);
595
146
  return Dest;
596
3.23k
}
597
598
209
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
599
  // Add this label to the current lexical scope if we're within any
600
  // normal cleanups.  Jumps "in" to this label --- when permitted by
601
  // the language --- may need to be routed around such cleanups.
602
209
  if (EHStack.hasNormalCleanups() && 
CurLexicalScope10
)
603
0
    CurLexicalScope->addLabel(D);
604
605
209
  JumpDest &Dest = LabelMap[D];
606
607
  // If we didn't need a forward reference to this label, just go
608
  // ahead and create a destination at the current scope.
609
209
  if (!Dest.isValid()) {
610
63
    Dest = getJumpDestInCurrentScope(D->getName());
611
612
  // Otherwise, we need to give this label a target depth and remove
613
  // it from the branch-fixups list.
614
146
  } else {
615
146
    assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
616
0
    Dest.setScopeDepth(EHStack.stable_begin());
617
146
    ResolveBranchFixups(Dest.getBlock());
618
146
  }
619
620
0
  EmitBlock(Dest.getBlock());
621
622
  // Emit debug info for labels.
623
209
  if (CGDebugInfo *DI = getDebugInfo()) {
624
19
    if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
625
19
      DI->setLocation(D->getLocation());
626
19
      DI->EmitLabel(D, Builder);
627
19
    }
628
19
  }
629
630
209
  incrementProfileCounter(D->getStmt());
631
209
}
632
633
/// Change the cleanup scope of the labels in this lexical scope to
634
/// match the scope of the enclosing context.
635
0
void CodeGenFunction::LexicalScope::rescopeLabels() {
636
0
  assert(!Labels.empty());
637
0
  EHScopeStack::stable_iterator innermostScope
638
0
    = CGF.EHStack.getInnermostNormalCleanup();
639
640
  // Change the scope depth of all the labels.
641
0
  for (SmallVectorImpl<const LabelDecl*>::const_iterator
642
0
         i = Labels.begin(), e = Labels.end(); i != e; ++i) {
643
0
    assert(CGF.LabelMap.count(*i));
644
0
    JumpDest &dest = CGF.LabelMap.find(*i)->second;
645
0
    assert(dest.getScopeDepth().isValid());
646
0
    assert(innermostScope.encloses(dest.getScopeDepth()));
647
0
    dest.setScopeDepth(innermostScope);
648
0
  }
649
650
  // Reparent the labels if the new scope also has cleanups.
651
0
  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
652
0
    ParentScope->Labels.append(Labels.begin(), Labels.end());
653
0
  }
654
0
}
655
656
657
206
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
658
206
  EmitLabel(S.getDecl());
659
660
  // IsEHa - emit eha.scope.begin if it's a side entry of a scope
661
206
  if (getLangOpts().EHAsynch && 
S.isSideEntry()0
)
662
0
    EmitSehCppScopeBegin();
663
664
206
  EmitStmt(S.getSubStmt());
665
206
}
666
667
274
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
668
274
  bool nomerge = false;
669
274
  const CallExpr *musttail = nullptr;
670
671
346
  for (const auto *A : S.getAttrs()) {
672
346
    if (A->getKind() == attr::NoMerge) {
673
7
      nomerge = true;
674
7
    }
675
346
    if (A->getKind() == attr::MustTail) {
676
50
      const Stmt *Sub = S.getSubStmt();
677
50
      const ReturnStmt *R = cast<ReturnStmt>(Sub);
678
50
      musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
679
50
    }
680
346
  }
681
274
  SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
682
274
  SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
683
274
  EmitStmt(S.getSubStmt(), S.getAttrs());
684
274
}
685
686
3.12k
void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
687
  // If this code is reachable then emit a stop point (if generating
688
  // debug info). We have to do this ourselves because we are on the
689
  // "simple" statement path.
690
3.12k
  if (HaveInsertPoint())
691
3.11k
    EmitStopPoint(&S);
692
693
3.12k
  EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
694
3.12k
}
695
696
697
22
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
698
22
  if (const LabelDecl *Target = S.getConstantTarget()) {
699
0
    EmitBranchThroughCleanup(getJumpDestForLabel(Target));
700
0
    return;
701
0
  }
702
703
  // Ensure that we have an i8* for our PHI node.
704
22
  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
705
22
                                         Int8PtrTy, "addr");
706
22
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
707
708
  // Get the basic block for the indirect goto.
709
22
  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
710
711
  // The first instruction in the block has to be the PHI for the switch dest,
712
  // add an entry for this branch.
713
22
  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
714
715
22
  EmitBranch(IndGotoBB);
716
22
}
717
718
115k
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
719
  // The else branch of a consteval if statement is always the only branch that
720
  // can be runtime evaluated.
721
115k
  if (S.isConsteval()) {
722
3
    const Stmt *Executed = S.isNegatedConsteval() ? 
S.getThen()2
:
S.getElse()1
;
723
3
    if (Executed) {
724
3
      RunCleanupsScope ExecutedScope(*this);
725
3
      EmitStmt(Executed);
726
3
    }
727
3
    return;
728
3
  }
729
730
  // C99 6.8.4.1: The first substatement is executed if the expression compares
731
  // unequal to 0.  The condition must be a scalar type.
732
115k
  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
733
734
115k
  if (S.getInit())
735
16
    EmitStmt(S.getInit());
736
737
115k
  if (S.getConditionVariable())
738
28
    EmitDecl(*S.getConditionVariable());
739
740
  // If the condition constant folds and can be elided, try to avoid emitting
741
  // the condition and the dead arm of the if/else.
742
115k
  bool CondConstant;
743
115k
  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
744
115k
                                   S.isConstexpr())) {
745
    // Figure out which block (then or else) is executed.
746
1.53k
    const Stmt *Executed = S.getThen();
747
1.53k
    const Stmt *Skipped  = S.getElse();
748
1.53k
    if (!CondConstant)  // Condition false?
749
1.39k
      std::swap(Executed, Skipped);
750
751
    // If the skipped block has no labels in it, just emit the executed block.
752
    // This avoids emitting dead code and simplifies the CFG substantially.
753
1.53k
    if (S.isConstexpr() || 
!ContainsLabel(Skipped)1.52k
) {
754
1.53k
      if (CondConstant)
755
135
        incrementProfileCounter(&S);
756
1.53k
      if (Executed) {
757
1.41k
        RunCleanupsScope ExecutedScope(*this);
758
1.41k
        EmitStmt(Executed);
759
1.41k
      }
760
1.53k
      return;
761
1.53k
    }
762
1.53k
  }
763
764
  // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
765
  // the conditional branch.
766
114k
  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
767
114k
  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
768
114k
  llvm::BasicBlock *ElseBlock = ContBlock;
769
114k
  if (S.getElse())
770
14.1k
    ElseBlock = createBasicBlock("if.else");
771
772
  // Prefer the PGO based weights over the likelihood attribute.
773
  // When the build isn't optimized the metadata isn't used, so don't generate
774
  // it.
775
114k
  Stmt::Likelihood LH = Stmt::LH_None;
776
114k
  uint64_t Count = getProfileCount(S.getThen());
777
114k
  if (!Count && 
CGM.getCodeGenOpts().OptimizationLevel113k
)
778
3.39k
    LH = Stmt::getLikelihood(S.getThen(), S.getElse());
779
114k
  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
780
781
  // Emit the 'then' code.
782
114k
  EmitBlock(ThenBlock);
783
114k
  incrementProfileCounter(&S);
784
114k
  {
785
114k
    RunCleanupsScope ThenScope(*this);
786
114k
    EmitStmt(S.getThen());
787
114k
  }
788
114k
  EmitBranch(ContBlock);
789
790
  // Emit the 'else' code if present.
791
114k
  if (const Stmt *Else = S.getElse()) {
792
14.1k
    {
793
      // There is no need to emit line number for an unconditional branch.
794
14.1k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
795
14.1k
      EmitBlock(ElseBlock);
796
14.1k
    }
797
14.1k
    {
798
14.1k
      RunCleanupsScope ElseScope(*this);
799
14.1k
      EmitStmt(Else);
800
14.1k
    }
801
14.1k
    {
802
      // There is no need to emit line number for an unconditional branch.
803
14.1k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
804
14.1k
      EmitBranch(ContBlock);
805
14.1k
    }
806
14.1k
  }
807
808
  // Emit the continuation block for code after the if.
809
114k
  EmitBlock(ContBlock, true);
810
114k
}
811
812
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
813
1.89k
                                    ArrayRef<const Attr *> WhileAttrs) {
814
  // Emit the header for the loop, which will also become
815
  // the continue target.
816
1.89k
  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
817
1.89k
  EmitBlock(LoopHeader.getBlock());
818
819
  // Create an exit block for when the condition fails, which will
820
  // also become the break target.
821
1.89k
  JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
822
823
  // Store the blocks to use for break and continue.
824
1.89k
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
825
826
  // C++ [stmt.while]p2:
827
  //   When the condition of a while statement is a declaration, the
828
  //   scope of the variable that is declared extends from its point
829
  //   of declaration (3.3.2) to the end of the while statement.
830
  //   [...]
831
  //   The object created in a condition is destroyed and created
832
  //   with each iteration of the loop.
833
1.89k
  RunCleanupsScope ConditionScope(*this);
834
835
1.89k
  if (S.getConditionVariable())
836
5
    EmitDecl(*S.getConditionVariable());
837
838
  // Evaluate the conditional in the while header.  C99 6.8.5.1: The
839
  // evaluation of the controlling expression takes place before each
840
  // execution of the loop body.
841
1.89k
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
842
843
  // while(1) is common, avoid extra exit blocks.  Be sure
844
  // to correctly handle break/continue though.
845
1.89k
  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
846
1.89k
  bool CondIsConstInt = C != nullptr;
847
1.89k
  bool EmitBoolCondBranch = !CondIsConstInt || 
!C->isOne()466
;
848
1.89k
  const SourceRange &R = S.getSourceRange();
849
1.89k
  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
850
1.89k
                 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
851
1.89k
                 SourceLocToDebugLoc(R.getEnd()),
852
1.89k
                 checkIfLoopMustProgress(CondIsConstInt));
853
854
  // As long as the condition is true, go to the loop body.
855
1.89k
  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
856
1.89k
  if (EmitBoolCondBranch) {
857
1.44k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
858
1.44k
    if (ConditionScope.requiresCleanups())
859
3
      ExitBlock = createBasicBlock("while.exit");
860
1.44k
    llvm::MDNode *Weights =
861
1.44k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
862
1.44k
    if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel1.42k
)
863
25
      BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
864
25
          BoolCondVal, Stmt::getLikelihood(S.getBody()));
865
1.44k
    Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
866
867
1.44k
    if (ExitBlock != LoopExit.getBlock()) {
868
3
      EmitBlock(ExitBlock);
869
3
      EmitBranchThroughCleanup(LoopExit);
870
3
    }
871
1.44k
  } else 
if (const Attr *450
A450
= Stmt::getLikelihoodAttr(S.getBody())) {
872
2
    CGM.getDiags().Report(A->getLocation(),
873
2
                          diag::warn_attribute_has_no_effect_on_infinite_loop)
874
2
        << A << A->getRange();
875
2
    CGM.getDiags().Report(
876
2
        S.getWhileLoc(),
877
2
        diag::note_attribute_has_no_effect_on_infinite_loop_here)
878
2
        << SourceRange(S.getWhileLoc(), S.getRParenLoc());
879
2
  }
880
881
  // Emit the loop body.  We have to emit this in a cleanup scope
882
  // because it might be a singleton DeclStmt.
883
1.89k
  {
884
1.89k
    RunCleanupsScope BodyScope(*this);
885
1.89k
    EmitBlock(LoopBody);
886
1.89k
    incrementProfileCounter(&S);
887
1.89k
    EmitStmt(S.getBody());
888
1.89k
  }
889
890
1.89k
  BreakContinueStack.pop_back();
891
892
  // Immediately force cleanup.
893
1.89k
  ConditionScope.ForceCleanup();
894
895
1.89k
  EmitStopPoint(&S);
896
  // Branch to the loop header again.
897
1.89k
  EmitBranch(LoopHeader.getBlock());
898
899
1.89k
  LoopStack.pop();
900
901
  // Emit the exit block.
902
1.89k
  EmitBlock(LoopExit.getBlock(), true);
903
904
  // The LoopHeader typically is just a branch if we skipped emitting
905
  // a branch, try to erase it.
906
1.89k
  if (!EmitBoolCondBranch)
907
450
    SimplifyForwardingBlocks(LoopHeader.getBlock());
908
1.89k
}
909
910
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
911
756
                                 ArrayRef<const Attr *> DoAttrs) {
912
756
  JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
913
756
  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
914
915
756
  uint64_t ParentCount = getCurrentProfileCount();
916
917
  // Store the blocks to use for break and continue.
918
756
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
919
920
  // Emit the body of the loop.
921
756
  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
922
923
756
  EmitBlockWithFallThrough(LoopBody, &S);
924
756
  {
925
756
    RunCleanupsScope BodyScope(*this);
926
756
    EmitStmt(S.getBody());
927
756
  }
928
929
756
  EmitBlock(LoopCond.getBlock());
930
931
  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
932
  // after each execution of the loop body."
933
934
  // Evaluate the conditional in the while header.
935
  // C99 6.8.5p2/p4: The first substatement is executed if the expression
936
  // compares unequal to 0.  The condition must be a scalar type.
937
756
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
938
939
756
  BreakContinueStack.pop_back();
940
941
  // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
942
  // to correctly handle break/continue though.
943
756
  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
944
756
  bool CondIsConstInt = C;
945
756
  bool EmitBoolCondBranch = !C || 
!C->isZero()547
;
946
947
756
  const SourceRange &R = S.getSourceRange();
948
756
  LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
949
756
                 SourceLocToDebugLoc(R.getBegin()),
950
756
                 SourceLocToDebugLoc(R.getEnd()),
951
756
                 checkIfLoopMustProgress(CondIsConstInt));
952
953
  // As long as the condition is true, iterate the loop.
954
756
  if (EmitBoolCondBranch) {
955
289
    uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
956
289
    Builder.CreateCondBr(
957
289
        BoolCondVal, LoopBody, LoopExit.getBlock(),
958
289
        createProfileWeightsForLoop(S.getCond(), BackedgeCount));
959
289
  }
960
961
756
  LoopStack.pop();
962
963
  // Emit the exit block.
964
756
  EmitBlock(LoopExit.getBlock());
965
966
  // The DoCond block typically is just a branch if we skipped
967
  // emitting a branch, try to erase it.
968
756
  if (!EmitBoolCondBranch)
969
467
    SimplifyForwardingBlocks(LoopCond.getBlock());
970
756
}
971
972
void CodeGenFunction::EmitForStmt(const ForStmt &S,
973
16.9k
                                  ArrayRef<const Attr *> ForAttrs) {
974
16.9k
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
975
976
16.9k
  LexicalScope ForScope(*this, S.getSourceRange());
977
978
  // Evaluate the first part before the loop.
979
16.9k
  if (S.getInit())
980
16.3k
    EmitStmt(S.getInit());
981
982
  // Start the loop with a block that tests the condition.
983
  // If there's an increment, the continue scope will be overwritten
984
  // later.
985
16.9k
  JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
986
16.9k
  llvm::BasicBlock *CondBlock = CondDest.getBlock();
987
16.9k
  EmitBlock(CondBlock);
988
989
16.9k
  Expr::EvalResult Result;
990
16.9k
  bool CondIsConstInt =
991
16.9k
      !S.getCond() || 
S.getCond()->EvaluateAsInt(Result, getContext())16.8k
;
992
993
16.9k
  const SourceRange &R = S.getSourceRange();
994
16.9k
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
995
16.9k
                 SourceLocToDebugLoc(R.getBegin()),
996
16.9k
                 SourceLocToDebugLoc(R.getEnd()),
997
16.9k
                 checkIfLoopMustProgress(CondIsConstInt));
998
999
  // Create a cleanup scope for the condition variable cleanups.
1000
16.9k
  LexicalScope ConditionScope(*this, S.getSourceRange());
1001
1002
  // If the for loop doesn't have an increment we can just use the condition as
1003
  // the continue block. Otherwise, if there is no condition variable, we can
1004
  // form the continue block now. If there is a condition variable, we can't
1005
  // form the continue block until after we've emitted the condition, because
1006
  // the condition is in scope in the increment, but Sema's jump diagnostics
1007
  // ensure that there are no continues from the condition variable that jump
1008
  // to the loop increment.
1009
16.9k
  JumpDest Continue;
1010
16.9k
  if (!S.getInc())
1011
289
    Continue = CondDest;
1012
16.6k
  else if (!S.getConditionVariable())
1013
16.6k
    Continue = getJumpDestInCurrentScope("for.inc");
1014
16.9k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1015
1016
16.9k
  if (S.getCond()) {
1017
    // If the for statement has a condition scope, emit the local variable
1018
    // declaration.
1019
16.8k
    if (S.getConditionVariable()) {
1020
7
      EmitDecl(*S.getConditionVariable());
1021
1022
      // We have entered the condition variable's scope, so we're now able to
1023
      // jump to the continue block.
1024
7
      Continue = S.getInc() ? 
getJumpDestInCurrentScope("for.inc")6
:
CondDest1
;
1025
7
      BreakContinueStack.back().ContinueBlock = Continue;
1026
7
    }
1027
1028
16.8k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1029
    // If there are any cleanups between here and the loop-exit scope,
1030
    // create a block to stage a loop exit along.
1031
16.8k
    if (ForScope.requiresCleanups())
1032
118
      ExitBlock = createBasicBlock("for.cond.cleanup");
1033
1034
    // As long as the condition is true, iterate the loop.
1035
16.8k
    llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1036
1037
    // C99 6.8.5p2/p4: The first substatement is executed if the expression
1038
    // compares unequal to 0.  The condition must be a scalar type.
1039
16.8k
    llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1040
16.8k
    llvm::MDNode *Weights =
1041
16.8k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1042
16.8k
    if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel16.8k
)
1043
100
      BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1044
100
          BoolCondVal, Stmt::getLikelihood(S.getBody()));
1045
1046
16.8k
    Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1047
1048
16.8k
    if (ExitBlock != LoopExit.getBlock()) {
1049
118
      EmitBlock(ExitBlock);
1050
118
      EmitBranchThroughCleanup(LoopExit);
1051
118
    }
1052
1053
16.8k
    EmitBlock(ForBody);
1054
16.8k
  } else {
1055
    // Treat it as a non-zero constant.  Don't even create a new block for the
1056
    // body, just fall into it.
1057
66
  }
1058
16.9k
  incrementProfileCounter(&S);
1059
1060
16.9k
  {
1061
    // Create a separate cleanup scope for the body, in case it is not
1062
    // a compound statement.
1063
16.9k
    RunCleanupsScope BodyScope(*this);
1064
16.9k
    EmitStmt(S.getBody());
1065
16.9k
  }
1066
1067
  // If there is an increment, emit it next.
1068
16.9k
  if (S.getInc()) {
1069
16.6k
    EmitBlock(Continue.getBlock());
1070
16.6k
    EmitStmt(S.getInc());
1071
16.6k
  }
1072
1073
16.9k
  BreakContinueStack.pop_back();
1074
1075
16.9k
  ConditionScope.ForceCleanup();
1076
1077
16.9k
  EmitStopPoint(&S);
1078
16.9k
  EmitBranch(CondBlock);
1079
1080
16.9k
  ForScope.ForceCleanup();
1081
1082
16.9k
  LoopStack.pop();
1083
1084
  // Emit the fall-through block.
1085
16.9k
  EmitBlock(LoopExit.getBlock(), true);
1086
16.9k
}
1087
1088
void
1089
CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1090
129
                                     ArrayRef<const Attr *> ForAttrs) {
1091
129
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1092
1093
129
  LexicalScope ForScope(*this, S.getSourceRange());
1094
1095
  // Evaluate the first pieces before the loop.
1096
129
  if (S.getInit())
1097
2
    EmitStmt(S.getInit());
1098
129
  EmitStmt(S.getRangeStmt());
1099
129
  EmitStmt(S.getBeginStmt());
1100
129
  EmitStmt(S.getEndStmt());
1101
1102
  // Start the loop with a block that tests the condition.
1103
  // If there's an increment, the continue scope will be overwritten
1104
  // later.
1105
129
  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1106
129
  EmitBlock(CondBlock);
1107
1108
129
  const SourceRange &R = S.getSourceRange();
1109
129
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1110
129
                 SourceLocToDebugLoc(R.getBegin()),
1111
129
                 SourceLocToDebugLoc(R.getEnd()));
1112
1113
  // If there are any cleanups between here and the loop-exit scope,
1114
  // create a block to stage a loop exit along.
1115
129
  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1116
129
  if (ForScope.requiresCleanups())
1117
14
    ExitBlock = createBasicBlock("for.cond.cleanup");
1118
1119
  // The loop body, consisting of the specified body and the loop variable.
1120
129
  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1121
1122
  // The body is executed if the expression, contextually converted
1123
  // to bool, is true.
1124
129
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1125
129
  llvm::MDNode *Weights =
1126
129
      createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1127
129
  if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel128
)
1128
7
    BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1129
7
        BoolCondVal, Stmt::getLikelihood(S.getBody()));
1130
129
  Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1131
1132
129
  if (ExitBlock != LoopExit.getBlock()) {
1133
14
    EmitBlock(ExitBlock);
1134
14
    EmitBranchThroughCleanup(LoopExit);
1135
14
  }
1136
1137
129
  EmitBlock(ForBody);
1138
129
  incrementProfileCounter(&S);
1139
1140
  // Create a block for the increment. In case of a 'continue', we jump there.
1141
129
  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1142
1143
  // Store the blocks to use for break and continue.
1144
129
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1145
1146
129
  {
1147
    // Create a separate cleanup scope for the loop variable and body.
1148
129
    LexicalScope BodyScope(*this, S.getSourceRange());
1149
129
    EmitStmt(S.getLoopVarStmt());
1150
129
    EmitStmt(S.getBody());
1151
129
  }
1152
1153
129
  EmitStopPoint(&S);
1154
  // If there is an increment, emit it next.
1155
129
  EmitBlock(Continue.getBlock());
1156
129
  EmitStmt(S.getInc());
1157
1158
129
  BreakContinueStack.pop_back();
1159
1160
129
  EmitBranch(CondBlock);
1161
1162
129
  ForScope.ForceCleanup();
1163
1164
129
  LoopStack.pop();
1165
1166
  // Emit the fall-through block.
1167
129
  EmitBlock(LoopExit.getBlock(), true);
1168
129
}
1169
1170
457
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1171
457
  if (RV.isScalar()) {
1172
455
    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1173
455
  } else 
if (2
RV.isAggregate()2
) {
1174
2
    LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1175
2
    LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1176
2
    EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1177
2
  } else {
1178
0
    EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1179
0
                       /*init*/ true);
1180
0
  }
1181
457
  EmitBranchThroughCleanup(ReturnBlock);
1182
457
}
1183
1184
namespace {
1185
// RAII struct used to save and restore a return statment's result expression.
1186
struct SaveRetExprRAII {
1187
  SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1188
153k
      : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1189
153k
    CGF.RetExpr = RetExpr;
1190
153k
  }
1191
153k
  ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1192
  const Expr *OldRetExpr;
1193
  CodeGenFunction &CGF;
1194
};
1195
} // namespace
1196
1197
/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1198
/// codegen it as 'tail call ...; ret void;'.
1199
static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1200
2.99k
                                     const CGFunctionInfo *CurFnInfo) {
1201
2.99k
  auto calleeQualType = CE->getCallee()->getType();
1202
2.99k
  const FunctionType *calleeType = nullptr;
1203
2.99k
  if (calleeQualType->isFunctionPointerType() ||
1204
2.99k
      
calleeQualType->isFunctionReferenceType()45
||
1205
2.99k
      
calleeQualType->isBlockPointerType()45
||
1206
2.99k
      
calleeQualType->isMemberFunctionPointerType()45
) {
1207
2.94k
    calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1208
2.94k
  } else 
if (auto *45
ty45
= dyn_cast<FunctionType>(calleeQualType)) {
1209
1
    calleeType = ty;
1210
44
  } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1211
44
    if (auto methodDecl = CMCE->getMethodDecl()) {
1212
      // getMethodDecl() doesn't handle member pointers at the moment.
1213
34
      calleeType = methodDecl->getType()->castAs<FunctionType>();
1214
34
    } else {
1215
10
      return;
1216
10
    }
1217
44
  } else {
1218
0
    return;
1219
0
  }
1220
2.98k
  if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1221
2.98k
      
(CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)150
) {
1222
150
    auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1223
150
    CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1224
150
    Builder.CreateRetVoid();
1225
150
    Builder.ClearInsertionPoint();
1226
150
  }
1227
2.98k
}
1228
1229
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1230
/// if the function returns void, or may be missing one if the function returns
1231
/// non-void.  Fun stuff :).
1232
153k
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1233
153k
  if (requiresReturnValueCheck()) {
1234
15
    llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1235
15
    auto *SLocPtr =
1236
15
        new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1237
15
                                 llvm::GlobalVariable::PrivateLinkage, SLoc);
1238
15
    SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1239
15
    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1240
15
    assert(ReturnLocation.isValid() && "No valid return location");
1241
0
    Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1242
15
                        ReturnLocation);
1243
15
  }
1244
1245
  // Returning from an outlined SEH helper is UB, and we already warn on it.
1246
153k
  if (IsOutlinedSEHHelper) {
1247
8
    Builder.CreateUnreachable();
1248
8
    Builder.ClearInsertionPoint();
1249
8
  }
1250
1251
  // Emit the result value, even if unused, to evaluate the side effects.
1252
153k
  const Expr *RV = S.getRetValue();
1253
1254
  // Record the result expression of the return statement. The recorded
1255
  // expression is used to determine whether a block capture's lifetime should
1256
  // end at the end of the full expression as opposed to the end of the scope
1257
  // enclosing the block expression.
1258
  //
1259
  // This permits a small, easily-implemented exception to our over-conservative
1260
  // rules about not jumping to statements following block literals with
1261
  // non-trivial cleanups.
1262
153k
  SaveRetExprRAII SaveRetExpr(RV, *this);
1263
1264
153k
  RunCleanupsScope cleanupScope(*this);
1265
153k
  if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1266
5.65k
    RV = EWC->getSubExpr();
1267
  // FIXME: Clean this up by using an LValue for ReturnTemp,
1268
  // EmitStoreThroughLValue, and EmitAnyExpr.
1269
  // Check if the NRVO candidate was not globalized in OpenMP mode.
1270
153k
  if (getLangOpts().ElideConstructors && 
S.getNRVOCandidate()153k
&&
1271
153k
      
S.getNRVOCandidate()->isNRVOVariable()1.38k
&&
1272
153k
      
(1.37k
!getLangOpts().OpenMP1.37k
||
1273
1.37k
       !CGM.getOpenMPRuntime()
1274
82
            .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1275
1.37k
            .isValid())) {
1276
    // Apply the named return value optimization for this return statement,
1277
    // which means doing nothing: the appropriate result has already been
1278
    // constructed into the NRVO variable.
1279
1280
    // If there is an NRVO flag for this variable, set it to 1 into indicate
1281
    // that the cleanup code should not destroy the variable.
1282
1.37k
    if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1283
224
      Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1284
152k
  } else if (!ReturnValue.isValid() || 
(148k
RV148k
&&
RV->getType()->isVoidType()148k
)) {
1285
    // Make sure not to return anything, but evaluate the expression
1286
    // for side effects.
1287
3.92k
    if (RV) {
1288
3.03k
      EmitAnyExpr(RV);
1289
3.03k
      if (auto *CE = dyn_cast<CallExpr>(RV))
1290
2.99k
        makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1291
3.03k
    }
1292
148k
  } else if (!RV) {
1293
    // Do nothing (return value is left uninitialized)
1294
148k
  } else if (FnRetTy->isReferenceType()) {
1295
    // If this function returns a reference, take the address of the expression
1296
    // rather than the value.
1297
23.8k
    RValue Result = EmitReferenceBindingToExpr(RV);
1298
23.8k
    Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1299
124k
  } else {
1300
124k
    switch (getEvaluationKind(RV->getType())) {
1301
117k
    case TEK_Scalar:
1302
117k
      Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1303
117k
      break;
1304
663
    case TEK_Complex:
1305
663
      EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1306
663
                                /*isInit*/ true);
1307
663
      break;
1308
6.26k
    case TEK_Aggregate:
1309
6.26k
      EmitAggExpr(RV, AggValueSlot::forAddr(
1310
6.26k
                          ReturnValue, Qualifiers(),
1311
6.26k
                          AggValueSlot::IsDestructed,
1312
6.26k
                          AggValueSlot::DoesNotNeedGCBarriers,
1313
6.26k
                          AggValueSlot::IsNotAliased,
1314
6.26k
                          getOverlapForReturnValue()));
1315
6.26k
      break;
1316
124k
    }
1317
124k
  }
1318
1319
153k
  ++NumReturnExprs;
1320
153k
  if (!RV || 
RV->isEvaluatable(getContext())152k
)
1321
15.8k
    ++NumSimpleReturnExprs;
1322
1323
153k
  cleanupScope.ForceCleanup();
1324
153k
  EmitBranchThroughCleanup(ReturnBlock);
1325
153k
}
1326
1327
194k
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1328
  // As long as debug info is modeled with instructions, we have to ensure we
1329
  // have a place to insert here and write the stop point here.
1330
194k
  if (HaveInsertPoint())
1331
194k
    EmitStopPoint(&S);
1332
1333
194k
  for (const auto *I : S.decls())
1334
195k
    EmitDecl(*I);
1335
194k
}
1336
1337
5.65k
void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1338
5.65k
  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1339
1340
  // If this code is reachable then emit a stop point (if generating
1341
  // debug info). We have to do this ourselves because we are on the
1342
  // "simple" statement path.
1343
5.65k
  if (HaveInsertPoint())
1344
5.65k
    EmitStopPoint(&S);
1345
1346
5.65k
  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1347
5.65k
}
1348
1349
10.7k
void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1350
10.7k
  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1351
1352
  // If this code is reachable then emit a stop point (if generating
1353
  // debug info). We have to do this ourselves because we are on the
1354
  // "simple" statement path.
1355
10.7k
  if (HaveInsertPoint())
1356
10.7k
    EmitStopPoint(&S);
1357
1358
10.7k
  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1359
10.7k
}
1360
1361
/// EmitCaseStmtRange - If case statement range is not too big then
1362
/// add multiple cases to switch instruction, one for each value within
1363
/// the range. If range is too big then emit "if" condition check.
1364
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1365
43
                                        ArrayRef<const Attr *> Attrs) {
1366
43
  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1367
1368
0
  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1369
43
  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1370
1371
  // Emit the code for this case. We do this first to make sure it is
1372
  // properly chained from our predecessor before generating the
1373
  // switch machinery to enter this block.
1374
43
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1375
43
  EmitBlockWithFallThrough(CaseDest, &S);
1376
43
  EmitStmt(S.getSubStmt());
1377
1378
  // If range is empty, do nothing.
1379
43
  if (LHS.isSigned() ? 
RHS.slt(LHS)39
:
RHS.ult(LHS)4
)
1380
4
    return;
1381
1382
39
  Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1383
39
  llvm::APInt Range = RHS - LHS;
1384
  // FIXME: parameters such as this should not be hardcoded.
1385
39
  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1386
    // Range is small enough to add multiple switch instruction cases.
1387
22
    uint64_t Total = getProfileCount(&S);
1388
22
    unsigned NCases = Range.getZExtValue() + 1;
1389
    // We only have one region counter for the entire set of cases here, so we
1390
    // need to divide the weights evenly between the generated cases, ensuring
1391
    // that the total weight is preserved. E.g., a weight of 5 over three cases
1392
    // will be distributed as weights of 2, 2, and 1.
1393
22
    uint64_t Weight = Total / NCases, Rem = Total % NCases;
1394
103
    for (unsigned I = 0; I != NCases; 
++I81
) {
1395
81
      if (SwitchWeights)
1396
24
        SwitchWeights->push_back(Weight + (Rem ? 
14
:
020
));
1397
57
      else if (SwitchLikelihood)
1398
35
        SwitchLikelihood->push_back(LH);
1399
1400
81
      if (Rem)
1401
4
        Rem--;
1402
81
      SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1403
81
      ++LHS;
1404
81
    }
1405
22
    return;
1406
22
  }
1407
1408
  // The range is too big. Emit "if" condition into a new block,
1409
  // making sure to save and restore the current insertion point.
1410
17
  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1411
1412
  // Push this test onto the chain of range checks (which terminates
1413
  // in the default basic block). The switch's default will be changed
1414
  // to the top of this chain after switch emission is complete.
1415
17
  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1416
17
  CaseRangeBlock = createBasicBlock("sw.caserange");
1417
1418
17
  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1419
17
  Builder.SetInsertPoint(CaseRangeBlock);
1420
1421
  // Emit range check.
1422
17
  llvm::Value *Diff =
1423
17
    Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1424
17
  llvm::Value *Cond =
1425
17
    Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1426
1427
17
  llvm::MDNode *Weights = nullptr;
1428
17
  if (SwitchWeights) {
1429
8
    uint64_t ThisCount = getProfileCount(&S);
1430
8
    uint64_t DefaultCount = (*SwitchWeights)[0];
1431
8
    Weights = createProfileWeights(ThisCount, DefaultCount);
1432
1433
    // Since we're chaining the switch default through each large case range, we
1434
    // need to update the weight for the default, ie, the first case, to include
1435
    // this case.
1436
8
    (*SwitchWeights)[0] += ThisCount;
1437
9
  } else if (SwitchLikelihood)
1438
7
    Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1439
1440
17
  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1441
1442
  // Restore the appropriate insertion point.
1443
17
  if (RestoreBB)
1444
13
    Builder.SetInsertPoint(RestoreBB);
1445
4
  else
1446
4
    Builder.ClearInsertionPoint();
1447
17
}
1448
1449
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1450
985
                                   ArrayRef<const Attr *> Attrs) {
1451
  // If there is no enclosing switch instance that we're aware of, then this
1452
  // case statement and its block can be elided.  This situation only happens
1453
  // when we've constant-folded the switch, are emitting the constant case,
1454
  // and part of the constant case includes another case statement.  For
1455
  // instance: switch (4) { case 4: do { case 5: } while (1); }
1456
985
  if (!SwitchInsn) {
1457
2
    EmitStmt(S.getSubStmt());
1458
2
    return;
1459
2
  }
1460
1461
  // Handle case ranges.
1462
983
  if (S.getRHS()) {
1463
43
    EmitCaseStmtRange(S, Attrs);
1464
43
    return;
1465
43
  }
1466
1467
940
  llvm::ConstantInt *CaseVal =
1468
940
    Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1469
940
  if (SwitchLikelihood)
1470
96
    SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1471
1472
  // If the body of the case is just a 'break', try to not emit an empty block.
1473
  // If we're profiling or we're not optimizing, leave the block in for better
1474
  // debug and coverage analysis.
1475
940
  if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1476
940
      
CGM.getCodeGenOpts().OptimizationLevel > 0910
&&
1477
940
      
isa<BreakStmt>(S.getSubStmt())96
) {
1478
15
    JumpDest Block = BreakContinueStack.back().BreakBlock;
1479
1480
    // Only do this optimization if there are no cleanups that need emitting.
1481
15
    if (isObviouslyBranchWithoutCleanups(Block)) {
1482
15
      if (SwitchWeights)
1483
0
        SwitchWeights->push_back(getProfileCount(&S));
1484
15
      SwitchInsn->addCase(CaseVal, Block.getBlock());
1485
1486
      // If there was a fallthrough into this case, make sure to redirect it to
1487
      // the end of the switch as well.
1488
15
      if (Builder.GetInsertBlock()) {
1489
0
        Builder.CreateBr(Block.getBlock());
1490
0
        Builder.ClearInsertionPoint();
1491
0
      }
1492
15
      return;
1493
15
    }
1494
15
  }
1495
1496
925
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1497
925
  EmitBlockWithFallThrough(CaseDest, &S);
1498
925
  if (SwitchWeights)
1499
47
    SwitchWeights->push_back(getProfileCount(&S));
1500
925
  SwitchInsn->addCase(CaseVal, CaseDest);
1501
1502
  // Recursively emitting the statement is acceptable, but is not wonderful for
1503
  // code where we have many case statements nested together, i.e.:
1504
  //  case 1:
1505
  //    case 2:
1506
  //      case 3: etc.
1507
  // Handling this recursively will create a new block for each case statement
1508
  // that falls through to the next case which is IR intensive.  It also causes
1509
  // deep recursion which can run into stack depth limitations.  Handle
1510
  // sequential non-range case statements specially.
1511
  //
1512
  // TODO When the next case has a likelihood attribute the code returns to the
1513
  // recursive algorithm. Maybe improve this case if it becomes common practice
1514
  // to use a lot of attributes.
1515
925
  const CaseStmt *CurCase = &S;
1516
925
  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1517
1518
  // Otherwise, iteratively add consecutive cases to this switch stmt.
1519
1.03k
  while (NextCase && 
NextCase->getRHS() == nullptr118
) {
1520
112
    CurCase = NextCase;
1521
112
    llvm::ConstantInt *CaseVal =
1522
112
      Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1523
1524
112
    if (SwitchWeights)
1525
0
      SwitchWeights->push_back(getProfileCount(NextCase));
1526
112
    if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1527
3
      CaseDest = createBasicBlock("sw.bb");
1528
3
      EmitBlockWithFallThrough(CaseDest, CurCase);
1529
3
    }
1530
    // Since this loop is only executed when the CaseStmt has no attributes
1531
    // use a hard-coded value.
1532
112
    if (SwitchLikelihood)
1533
14
      SwitchLikelihood->push_back(Stmt::LH_None);
1534
1535
112
    SwitchInsn->addCase(CaseVal, CaseDest);
1536
112
    NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1537
112
  }
1538
1539
  // Generate a stop point for debug info if the case statement is
1540
  // followed by a default statement. A fallthrough case before a
1541
  // default case gets its own branch target.
1542
925
  if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1543
2
    EmitStopPoint(CurCase);
1544
1545
  // Normal default recursion for non-cases.
1546
925
  EmitStmt(CurCase->getSubStmt());
1547
925
}
1548
1549
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1550
182
                                      ArrayRef<const Attr *> Attrs) {
1551
  // If there is no enclosing switch instance that we're aware of, then this
1552
  // default statement can be elided. This situation only happens when we've
1553
  // constant-folded the switch.
1554
182
  if (!SwitchInsn) {
1555
1
    EmitStmt(S.getSubStmt());
1556
1
    return;
1557
1
  }
1558
1559
181
  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1560
181
  assert(DefaultBlock->empty() &&
1561
181
         "EmitDefaultStmt: Default block already defined?");
1562
1563
181
  if (SwitchLikelihood)
1564
28
    SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1565
1566
181
  EmitBlockWithFallThrough(DefaultBlock, &S);
1567
1568
181
  EmitStmt(S.getSubStmt());
1569
181
}
1570
1571
/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1572
/// constant value that is being switched on, see if we can dead code eliminate
1573
/// the body of the switch to a simple series of statements to emit.  Basically,
1574
/// on a switch (5) we want to find these statements:
1575
///    case 5:
1576
///      printf(...);    <--
1577
///      ++i;            <--
1578
///      break;
1579
///
1580
/// and add them to the ResultStmts vector.  If it is unsafe to do this
1581
/// transformation (for example, one of the elided statements contains a label
1582
/// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1583
/// should include statements after it (e.g. the printf() line is a substmt of
1584
/// the case) then return CSFC_FallThrough.  If we handled it and found a break
1585
/// statement, then return CSFC_Success.
1586
///
1587
/// If Case is non-null, then we are looking for the specified case, checking
1588
/// that nothing we jump over contains labels.  If Case is null, then we found
1589
/// the case and are looking for the break.
1590
///
1591
/// If the recursive walk actually finds our Case, then we set FoundCase to
1592
/// true.
1593
///
1594
enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1595
static CSFC_Result CollectStatementsForCase(const Stmt *S,
1596
                                            const SwitchCase *Case,
1597
                                            bool &FoundCase,
1598
137
                              SmallVectorImpl<const Stmt*> &ResultStmts) {
1599
  // If this is a null statement, just succeed.
1600
137
  if (!S)
1601
0
    return Case ? CSFC_Success : CSFC_FallThrough;
1602
1603
  // If this is the switchcase (case 4: or default) that we're looking for, then
1604
  // we're in business.  Just add the substatement.
1605
137
  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1606
42
    if (S == Case) {
1607
30
      FoundCase = true;
1608
30
      return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1609
30
                                      ResultStmts);
1610
30
    }
1611
1612
    // Otherwise, this is some other case or default statement, just ignore it.
1613
12
    return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1614
12
                                    ResultStmts);
1615
42
  }
1616
1617
  // If we are in the live part of the code and we found our break statement,
1618
  // return a success!
1619
95
  if (!Case && 
isa<BreakStmt>(S)50
)
1620
12
    return CSFC_Success;
1621
1622
  // If this is a switch statement, then it might contain the SwitchCase, the
1623
  // break, or neither.
1624
83
  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1625
    // Handle this as two cases: we might be looking for the SwitchCase (if so
1626
    // the skipped statements must be skippable) or we might already have it.
1627
35
    CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1628
35
    bool StartedInLiveCode = FoundCase;
1629
35
    unsigned StartSize = ResultStmts.size();
1630
1631
    // If we've not found the case yet, scan through looking for it.
1632
35
    if (Case) {
1633
      // Keep track of whether we see a skipped declaration.  The code could be
1634
      // using the declaration even if it is skipped, so we can't optimize out
1635
      // the decl if the kept statements might refer to it.
1636
28
      bool HadSkippedDecl = false;
1637
1638
      // If we're looking for the case, just see if we can skip each of the
1639
      // substatements.
1640
60
      for (; Case && 
I != E46
;
++I32
) {
1641
45
        HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1642
1643
45
        switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1644
2
        case CSFC_Failure: return CSFC_Failure;
1645
26
        case CSFC_Success:
1646
          // A successful result means that either 1) that the statement doesn't
1647
          // have the case and is skippable, or 2) does contain the case value
1648
          // and also contains the break to exit the switch.  In the later case,
1649
          // we just verify the rest of the statements are elidable.
1650
26
          if (FoundCase) {
1651
            // If we found the case and skipped declarations, we can't do the
1652
            // optimization.
1653
8
            if (HadSkippedDecl)
1654
0
              return CSFC_Failure;
1655
1656
18
            
for (++I; 8
I != E;
++I10
)
1657
10
              if (CodeGenFunction::ContainsLabel(*I, true))
1658
0
                return CSFC_Failure;
1659
8
            return CSFC_Success;
1660
8
          }
1661
18
          break;
1662
18
        case CSFC_FallThrough:
1663
          // If we have a fallthrough condition, then we must have found the
1664
          // case started to include statements.  Consider the rest of the
1665
          // statements in the compound statement as candidates for inclusion.
1666
17
          assert(FoundCase && "Didn't find case but returned fallthrough?");
1667
          // We recursively found Case, so we're not looking for it anymore.
1668
0
          Case = nullptr;
1669
1670
          // If we found the case and skipped declarations, we can't do the
1671
          // optimization.
1672
17
          if (HadSkippedDecl)
1673
3
            return CSFC_Failure;
1674
14
          break;
1675
45
        }
1676
45
      }
1677
1678
15
      if (!FoundCase)
1679
1
        return CSFC_Success;
1680
1681
14
      assert(!HadSkippedDecl && "fallthrough after skipping decl");
1682
14
    }
1683
1684
    // If we have statements in our range, then we know that the statements are
1685
    // live and need to be added to the set of statements we're tracking.
1686
21
    bool AnyDecls = false;
1687
36
    for (; I != E; 
++I15
) {
1688
20
      AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1689
1690
20
      switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1691
0
      case CSFC_Failure: return CSFC_Failure;
1692
15
      case CSFC_FallThrough:
1693
        // A fallthrough result means that the statement was simple and just
1694
        // included in ResultStmt, keep adding them afterwards.
1695
15
        break;
1696
5
      case CSFC_Success:
1697
        // A successful result means that we found the break statement and
1698
        // stopped statement inclusion.  We just ensure that any leftover stmts
1699
        // are skippable and return success ourselves.
1700
5
        for (++I; I != E; 
++I0
)
1701
0
          if (CodeGenFunction::ContainsLabel(*I, true))
1702
0
            return CSFC_Failure;
1703
5
        return CSFC_Success;
1704
20
      }
1705
20
    }
1706
1707
    // If we're about to fall out of a scope without hitting a 'break;', we
1708
    // can't perform the optimization if there were any decls in that scope
1709
    // (we'd lose their end-of-lifetime).
1710
16
    if (AnyDecls) {
1711
      // If the entire compound statement was live, there's one more thing we
1712
      // can try before giving up: emit the whole thing as a single statement.
1713
      // We can do that unless the statement contains a 'break;'.
1714
      // FIXME: Such a break must be at the end of a construct within this one.
1715
      // We could emit this by just ignoring the BreakStmts entirely.
1716
3
      if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1717
3
        ResultStmts.resize(StartSize);
1718
3
        ResultStmts.push_back(S);
1719
3
      } else {
1720
0
        return CSFC_Failure;
1721
0
      }
1722
3
    }
1723
1724
16
    return CSFC_FallThrough;
1725
16
  }
1726
1727
  // Okay, this is some other statement that we don't handle explicitly, like a
1728
  // for statement or increment etc.  If we are skipping over this statement,
1729
  // just verify it doesn't have labels, which would make it invalid to elide.
1730
48
  if (Case) {
1731
17
    if (CodeGenFunction::ContainsLabel(S, true))
1732
0
      return CSFC_Failure;
1733
17
    return CSFC_Success;
1734
17
  }
1735
1736
  // Otherwise, we want to include this statement.  Everything is cool with that
1737
  // so long as it doesn't contain a break out of the switch we're in.
1738
31
  if (CodeGenFunction::containsBreak(S)) 
return CSFC_Failure1
;
1739
1740
  // Otherwise, everything is great.  Include the statement and tell the caller
1741
  // that we fall through and include the next statement as well.
1742
30
  ResultStmts.push_back(S);
1743
30
  return CSFC_FallThrough;
1744
31
}
1745
1746
/// FindCaseStatementsForValue - Find the case statement being jumped to and
1747
/// then invoke CollectStatementsForCase to find the list of statements to emit
1748
/// for a switch on constant.  See the comment above CollectStatementsForCase
1749
/// for more details.
1750
static bool FindCaseStatementsForValue(const SwitchStmt &S,
1751
                                       const llvm::APSInt &ConstantCondValue,
1752
                                SmallVectorImpl<const Stmt*> &ResultStmts,
1753
                                       ASTContext &C,
1754
46
                                       const SwitchCase *&ResultCase) {
1755
  // First step, find the switch case that is being branched to.  We can do this
1756
  // efficiently by scanning the SwitchCase list.
1757
46
  const SwitchCase *Case = S.getSwitchCaseList();
1758
46
  const DefaultStmt *DefaultCase = nullptr;
1759
1760
82
  for (; Case; 
Case = Case->getNextSwitchCase()36
) {
1761
    // It's either a default or case.  Just remember the default statement in
1762
    // case we're not jumping to any numbered cases.
1763
62
    if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1764
18
      DefaultCase = DS;
1765
18
      continue;
1766
18
    }
1767
1768
    // Check to see if this case is the one we're looking for.
1769
44
    const CaseStmt *CS = cast<CaseStmt>(Case);
1770
    // Don't handle case ranges yet.
1771
44
    if (CS->getRHS()) 
return false7
;
1772
1773
    // If we found our case, remember it as 'case'.
1774
37
    if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1775
19
      break;
1776
37
  }
1777
1778
  // If we didn't find a matching case, we use a default if it exists, or we
1779
  // elide the whole switch body!
1780
39
  if (!Case) {
1781
    // It is safe to elide the body of the switch if it doesn't contain labels
1782
    // etc.  If it is safe, return successfully with an empty ResultStmts list.
1783
20
    if (!DefaultCase)
1784
9
      return !CodeGenFunction::ContainsLabel(&S);
1785
11
    Case = DefaultCase;
1786
11
  }
1787
1788
  // Ok, we know which case is being jumped to, try to collect all the
1789
  // statements that follow it.  This can fail for a variety of reasons.  Also,
1790
  // check to see that the recursive walk actually found our case statement.
1791
  // Insane cases like this can fail to find it in the recursive walk since we
1792
  // don't handle every stmt kind:
1793
  // switch (4) {
1794
  //   while (1) {
1795
  //     case 4: ...
1796
30
  bool FoundCase = false;
1797
30
  ResultCase = Case;
1798
30
  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1799
30
                                  ResultStmts) != CSFC_Failure &&
1800
30
         
FoundCase26
;
1801
39
}
1802
1803
static Optional<SmallVector<uint64_t, 16>>
1804
55
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1805
  // Are there enough branches to weight them?
1806
55
  if (Likelihoods.size() <= 1)
1807
9
    return None;
1808
1809
46
  uint64_t NumUnlikely = 0;
1810
46
  uint64_t NumNone = 0;
1811
46
  uint64_t NumLikely = 0;
1812
191
  for (const auto LH : Likelihoods) {
1813
191
    switch (LH) {
1814
8
    case Stmt::LH_Unlikely:
1815
8
      ++NumUnlikely;
1816
8
      break;
1817
172
    case Stmt::LH_None:
1818
172
      ++NumNone;
1819
172
      break;
1820
11
    case Stmt::LH_Likely:
1821
11
      ++NumLikely;
1822
11
      break;
1823
191
    }
1824
191
  }
1825
1826
  // Is there a likelihood attribute used?
1827
46
  if (NumUnlikely == 0 && 
NumLikely == 038
)
1828
30
    return None;
1829
1830
  // When multiple cases share the same code they can be combined during
1831
  // optimization. In that case the weights of the branch will be the sum of
1832
  // the individual weights. Make sure the combined sum of all neutral cases
1833
  // doesn't exceed the value of a single likely attribute.
1834
  // The additions both avoid divisions by 0 and make sure the weights of None
1835
  // don't exceed the weight of Likely.
1836
16
  const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1837
16
  const uint64_t None = Likely / (NumNone + 1);
1838
16
  const uint64_t Unlikely = 0;
1839
1840
16
  SmallVector<uint64_t, 16> Result;
1841
16
  Result.reserve(Likelihoods.size());
1842
60
  for (const auto LH : Likelihoods) {
1843
60
    switch (LH) {
1844
8
    case Stmt::LH_Unlikely:
1845
8
      Result.push_back(Unlikely);
1846
8
      break;
1847
41
    case Stmt::LH_None:
1848
41
      Result.push_back(None);
1849
41
      break;
1850
11
    case Stmt::LH_Likely:
1851
11
      Result.push_back(Likely);
1852
11
      break;
1853
60
    }
1854
60
  }
1855
1856
16
  return Result;
1857
16
}
1858
1859
442
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1860
  // Handle nested switch statements.
1861
442
  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1862
442
  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1863
442
  SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1864
442
  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1865
1866
  // See if we can constant fold the condition of the switch and therefore only
1867
  // emit the live case statement (if any) of the switch.
1868
442
  llvm::APSInt ConstantCondValue;
1869
442
  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1870
46
    SmallVector<const Stmt*, 4> CaseStmts;
1871
46
    const SwitchCase *Case = nullptr;
1872
46
    if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1873
46
                                   getContext(), Case)) {
1874
35
      if (Case)
1875
26
        incrementProfileCounter(Case);
1876
35
      RunCleanupsScope ExecutedScope(*this);
1877
1878
35
      if (S.getInit())
1879
0
        EmitStmt(S.getInit());
1880
1881
      // Emit the condition variable if needed inside the entire cleanup scope
1882
      // used by this special case for constant folded switches.
1883
35
      if (S.getConditionVariable())
1884
0
        EmitDecl(*S.getConditionVariable());
1885
1886
      // At this point, we are no longer "within" a switch instance, so
1887
      // we can temporarily enforce this to ensure that any embedded case
1888
      // statements are not emitted.
1889
35
      SwitchInsn = nullptr;
1890
1891
      // Okay, we can dead code eliminate everything except this case.  Emit the
1892
      // specified series of statements and we're good.
1893
59
      for (unsigned i = 0, e = CaseStmts.size(); i != e; 
++i24
)
1894
24
        EmitStmt(CaseStmts[i]);
1895
35
      incrementProfileCounter(&S);
1896
1897
      // Now we want to restore the saved switch instance so that nested
1898
      // switches continue to function properly
1899
35
      SwitchInsn = SavedSwitchInsn;
1900
1901
35
      return;
1902
35
    }
1903
46
  }
1904
1905
407
  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1906
1907
407
  RunCleanupsScope ConditionScope(*this);
1908
1909
407
  if (S.getInit())
1910
7
    EmitStmt(S.getInit());
1911
1912
407
  if (S.getConditionVariable())
1913
4
    EmitDecl(*S.getConditionVariable());
1914
407
  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1915
1916
  // Create basic block to hold stuff that comes after switch
1917
  // statement. We also need to create a default block now so that
1918
  // explicit case ranges tests can have a place to jump to on
1919
  // failure.
1920
407
  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1921
407
  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1922
407
  if (PGO.haveRegionCounts()) {
1923
    // Walk the SwitchCase list to find how many there are.
1924
23
    uint64_t DefaultCount = 0;
1925
23
    unsigned NumCases = 0;
1926
23
    for (const SwitchCase *Case = S.getSwitchCaseList();
1927
101
         Case;
1928
78
         Case = Case->getNextSwitchCase()) {
1929
78
      if (isa<DefaultStmt>(Case))
1930
15
        DefaultCount = getProfileCount(Case);
1931
78
      NumCases += 1;
1932
78
    }
1933
23
    SwitchWeights = new SmallVector<uint64_t, 16>();
1934
23
    SwitchWeights->reserve(NumCases);
1935
    // The default needs to be first. We store the edge count, so we already
1936
    // know the right weight.
1937
23
    SwitchWeights->push_back(DefaultCount);
1938
384
  } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1939
55
    SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1940
    // Initialize the default case.
1941
55
    SwitchLikelihood->push_back(Stmt::LH_None);
1942
55
  }
1943
1944
407
  CaseRangeBlock = DefaultBlock;
1945
1946
  // Clear the insertion point to indicate we are in unreachable code.
1947
407
  Builder.ClearInsertionPoint();
1948
1949
  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1950
  // then reuse last ContinueBlock.
1951
407
  JumpDest OuterContinue;
1952
407
  if (!BreakContinueStack.empty())
1953
44
    OuterContinue = BreakContinueStack.back().ContinueBlock;
1954
1955
407
  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1956
1957
  // Emit switch body.
1958
407
  EmitStmt(S.getBody());
1959
1960
407
  BreakContinueStack.pop_back();
1961
1962
  // Update the default block in case explicit case range tests have
1963
  // been chained on top.
1964
407
  SwitchInsn->setDefaultDest(CaseRangeBlock);
1965
1966
  // If a default was never emitted:
1967
407
  if (!DefaultBlock->getParent()) {
1968
    // If we have cleanups, emit the default block so that there's a
1969
    // place to jump through the cleanups from.
1970
226
    if (ConditionScope.requiresCleanups()) {
1971
0
      EmitBlock(DefaultBlock);
1972
1973
    // Otherwise, just forward the default block to the switch end.
1974
226
    } else {
1975
226
      DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1976
226
      delete DefaultBlock;
1977
226
    }
1978
226
  }
1979
1980
407
  ConditionScope.ForceCleanup();
1981
1982
  // Emit continuation.
1983
407
  EmitBlock(SwitchExit.getBlock(), true);
1984
407
  incrementProfileCounter(&S);
1985
1986
  // If the switch has a condition wrapped by __builtin_unpredictable,
1987
  // create metadata that specifies that the switch is unpredictable.
1988
  // Don't bother if not optimizing because that metadata would not be used.
1989
407
  auto *Call = dyn_cast<CallExpr>(S.getCond());
1990
407
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 045
) {
1991
6
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1992
6
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1993
2
      llvm::MDBuilder MDHelper(getLLVMContext());
1994
2
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1995
2
                              MDHelper.createUnpredictable());
1996
2
    }
1997
6
  }
1998
1999
407
  if (SwitchWeights) {
2000
23
    assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2001
23
           "switch weights do not match switch cases");
2002
    // If there's only one jump destination there's no sense weighting it.
2003
23
    if (SwitchWeights->size() > 1)
2004
19
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2005
19
                              createProfileWeights(*SwitchWeights));
2006
23
    delete SwitchWeights;
2007
384
  } else if (SwitchLikelihood) {
2008
55
    assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2009
55
           "switch likelihoods do not match switch cases");
2010
0
    Optional<SmallVector<uint64_t, 16>> LHW =
2011
55
        getLikelihoodWeights(*SwitchLikelihood);
2012
55
    if (LHW) {
2013
16
      llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2014
16
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2015
16
                              createProfileWeights(*LHW));
2016
16
    }
2017
55
    delete SwitchLikelihood;
2018
55
  }
2019
0
  SwitchInsn = SavedSwitchInsn;
2020
407
  SwitchWeights = SavedSwitchWeights;
2021
407
  SwitchLikelihood = SavedSwitchLikelihood;
2022
407
  CaseRangeBlock = SavedCRBlock;
2023
407
}
2024
2025
static std::string
2026
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2027
3.01k
                 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2028
3.01k
  std::string Result;
2029
2030
7.40k
  while (*Constraint) {
2031
4.38k
    switch (*Constraint) {
2032
3.59k
    default:
2033
3.59k
      Result += Target.convertConstraint(Constraint);
2034
3.59k
      break;
2035
    // Ignore these
2036
1
    case '*':
2037
1
    case '?':
2038
1
    case '!':
2039
3
    case '=': // Will see this and the following in mult-alt constraints.
2040
3
    case '+':
2041
3
      break;
2042
1
    case '#': // Ignore the rest of the constraint alternative.
2043
3
      while (Constraint[1] && Constraint[1] != ',')
2044
2
        Constraint++;
2045
1
      break;
2046
14
    case '&':
2047
16
    case '%':
2048
16
      Result += *Constraint;
2049
18
      while (Constraint[1] && Constraint[1] == *Constraint)
2050
2
        Constraint++;
2051
16
      break;
2052
622
    case ',':
2053
622
      Result += "|";
2054
622
      break;
2055
143
    case 'g':
2056
143
      Result += "imr";
2057
143
      break;
2058
6
    case '[': {
2059
6
      assert(OutCons &&
2060
6
             "Must pass output names to constraints with a symbolic name");
2061
0
      unsigned Index;
2062
6
      bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2063
6
      assert(result && "Could not resolve symbolic name"); (void)result;
2064
6
      Result += llvm::utostr(Index);
2065
6
      break;
2066
14
    }
2067
4.38k
    }
2068
2069
4.38k
    Constraint++;
2070
4.38k
  }
2071
2072
3.01k
  return Result;
2073
3.01k
}
2074
2075
/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2076
/// as using a particular register add that as a constraint that will be used
2077
/// in this asm stmt.
2078
static std::string
2079
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2080
                       const TargetInfo &Target, CodeGenModule &CGM,
2081
                       const AsmStmt &Stmt, const bool EarlyClobber,
2082
3.01k
                       std::string *GCCReg = nullptr) {
2083
3.01k
  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2084
3.01k
  if (!AsmDeclRef)
2085
979
    return Constraint;
2086
2.04k
  const ValueDecl &Value = *AsmDeclRef->getDecl();
2087
2.04k
  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2088
2.04k
  if (!Variable)
2089
11
    return Constraint;
2090
2.02k
  if (Variable->getStorageClass() != SC_Register)
2091
1.39k
    return Constraint;
2092
639
  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2093
639
  if (!Attr)
2094
574
    return Constraint;
2095
65
  StringRef Register = Attr->getLabel();
2096
65
  assert(Target.isValidGCCRegisterName(Register));
2097
  // We're using validateOutputConstraint here because we only care if
2098
  // this is a register constraint.
2099
0
  TargetInfo::ConstraintInfo Info(Constraint, "");
2100
65
  if (Target.validateOutputConstraint(Info) &&
2101
65
      
!Info.allowsRegister()0
) {
2102
0
    CGM.ErrorUnsupported(&Stmt, "__asm__");
2103
0
    return Constraint;
2104
0
  }
2105
  // Canonicalize the register here before returning it.
2106
65
  Register = Target.getNormalizedGCCRegisterName(Register);
2107
65
  if (GCCReg != nullptr)
2108
12
    *GCCReg = Register.str();
2109
65
  return (EarlyClobber ? 
"&{"5
:
"{"60
) + Register.str() + "}";
2110
65
}
2111
2112
std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2113
    const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2114
347
    QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2115
347
  if (Info.allowsRegister() || 
!Info.allowsMemory()180
) {
2116
167
    if (CodeGenFunction::hasScalarEvaluationKind(InputType))
2117
162
      return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2118
2119
5
    llvm::Type *Ty = ConvertType(InputType);
2120
5
    uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2121
5
    if ((Size <= 64 && 
llvm::isPowerOf2_64(Size)3
) ||
2122
5
        
getTargetHooks().isScalarizableAsmOperand(*this, Ty)2
) {
2123
5
      Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2124
5
      Ty = llvm::PointerType::getUnqual(Ty);
2125
2126
5
      return {Builder.CreateLoad(
2127
5
                  Builder.CreateBitCast(InputValue.getAddress(*this), Ty)),
2128
5
              nullptr};
2129
5
    }
2130
5
  }
2131
2132
180
  Address Addr = InputValue.getAddress(*this);
2133
180
  ConstraintStr += '*';
2134
180
  return {Addr.getPointer(), Addr.getElementType()};
2135
347
}
2136
2137
std::pair<llvm::Value *, llvm::Type *>
2138
CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2139
                              const Expr *InputExpr,
2140
1.73k
                              std::string &ConstraintStr) {
2141
  // If this can't be a register or memory, i.e., has to be a constant
2142
  // (immediate or symbolic), try to emit it as such.
2143
1.73k
  if (!Info.allowsRegister() && 
!Info.allowsMemory()435
) {
2144
279
    if (Info.requiresImmediateConstant()) {
2145
59
      Expr::EvalResult EVResult;
2146
59
      InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2147
2148
59
      llvm::APSInt IntResult;
2149
59
      if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2150
59
                                          getContext()))
2151
57
        return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2152
59
    }
2153
2154
222
    Expr::EvalResult Result;
2155
222
    if (InputExpr->EvaluateAsInt(Result, getContext()))
2156
172
      return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2157
172
              nullptr};
2158
222
  }
2159
2160
1.50k
  if (Info.allowsRegister() || 
!Info.allowsMemory()206
)
2161
1.34k
    if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2162
1.34k
      return {EmitScalarExpr(InputExpr), nullptr};
2163
161
  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2164
3
    return {EmitScalarExpr(InputExpr), nullptr};
2165
158
  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2166
158
  LValue Dest = EmitLValue(InputExpr);
2167
158
  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2168
158
                            InputExpr->getExprLoc());
2169
161
}
2170
2171
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2172
/// asm call instruction.  The !srcloc MDNode contains a list of constant
2173
/// integers which are the source locations of the start of each line in the
2174
/// asm.
2175
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2176
1.63k
                                      CodeGenFunction &CGF) {
2177
1.63k
  SmallVector<llvm::Metadata *, 8> Locs;
2178
  // Add the location of the first line to the MDNode.
2179
1.63k
  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2180
1.63k
      CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2181
1.63k
  StringRef StrVal = Str->getString();
2182
1.63k
  if (!StrVal.empty()) {
2183
1.58k
    const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2184
1.58k
    const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2185
1.58k
    unsigned StartToken = 0;
2186
1.58k
    unsigned ByteOffset = 0;
2187
2188
    // Add the location of the start of each subsequent line of the asm to the
2189
    // MDNode.
2190
29.2k
    for (unsigned i = 0, e = StrVal.size() - 1; i != e; 
++i27.6k
) {
2191
27.6k
      if (StrVal[i] != '\n') 
continue27.0k
;
2192
658
      SourceLocation LineLoc = Str->getLocationOfByte(
2193
658
          i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2194
658
      Locs.push_back(llvm::ConstantAsMetadata::get(
2195
658
          llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2196
658
    }
2197
1.58k
  }
2198
2199
1.63k
  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2200
1.63k
}
2201
2202
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2203
                              bool HasUnwindClobber, bool ReadOnly,
2204
                              bool ReadNone, bool NoMerge, const AsmStmt &S,
2205
                              const std::vector<llvm::Type *> &ResultRegTypes,
2206
                              const std::vector<llvm::Type *> &ArgElemTypes,
2207
                              CodeGenFunction &CGF,
2208
1.80k
                              std::vector<llvm::Value *> &RegResults) {
2209
1.80k
  if (!HasUnwindClobber)
2210
1.79k
    Result.addFnAttr(llvm::Attribute::NoUnwind);
2211
2212
1.80k
  if (NoMerge)
2213
1
    Result.addFnAttr(llvm::Attribute::NoMerge);
2214
  // Attach readnone and readonly attributes.
2215
1.80k
  if (!HasSideEffect) {
2216
765
    if (ReadNone)
2217
309
      Result.addFnAttr(llvm::Attribute::ReadNone);
2218
456
    else if (ReadOnly)
2219
315
      Result.addFnAttr(llvm::Attribute::ReadOnly);
2220
765
  }
2221
2222
  // Add elementtype attribute for indirect constraints.
2223
2.17k
  for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2224
2.17k
    if (Pair.value()) {
2225
378
      auto Attr = llvm::Attribute::get(
2226
378
          CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2227
378
      Result.addParamAttr(Pair.index(), Attr);
2228
378
    }
2229
2.17k
  }
2230
2231
  // Slap the source location of the inline asm into a !srcloc metadata on the
2232
  // call.
2233
1.80k
  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2234
1.63k
    Result.setMetadata("srcloc",
2235
1.63k
                       getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2236
165
  else {
2237
    // At least put the line number on MS inline asm blobs.
2238
165
    llvm::Constant *Loc =
2239
165
        llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2240
165
    Result.setMetadata("srcloc",
2241
165
                       llvm::MDNode::get(CGF.getLLVMContext(),
2242
165
                                         llvm::ConstantAsMetadata::get(Loc)));
2243
165
  }
2244
2245
1.80k
  if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2246
    // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2247
    // convergent (meaning, they may call an intrinsically convergent op, such
2248
    // as bar.sync, and so can't have certain optimizations applied around
2249
    // them).
2250
1
    Result.addFnAttr(llvm::Attribute::Convergent);
2251
  // Extract all of the register value results from the asm.
2252
1.80k
  if (ResultRegTypes.size() == 1) {
2253
778
    RegResults.push_back(&Result);
2254
1.02k
  } else {
2255
1.35k
    for (unsigned i = 0, e = ResultRegTypes.size(); i != e; 
++i337
) {
2256
337
      llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2257
337
      RegResults.push_back(Tmp);
2258
337
    }
2259
1.02k
  }
2260
1.80k
}
2261
2262
1.80k
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2263
  // Assemble the final asm string.
2264
1.80k
  std::string AsmString = S.generateAsmString(getContext());
2265
2266
  // Get all the output and input constraints together.
2267
1.80k
  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2268
1.80k
  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2269
2270
3.08k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.28k
) {
2271
1.28k
    StringRef Name;
2272
1.28k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2273
1.25k
      Name = GAS->getOutputName(i);
2274
1.28k
    TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2275
1.28k
    bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2276
1.28k
    assert(IsValid && "Failed to parse output constraint");
2277
0
    OutputConstraintInfos.push_back(Info);
2278
1.28k
  }
2279
2280
3.53k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.73k
) {
2281
1.73k
    StringRef Name;
2282
1.73k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2283
1.62k
      Name = GAS->getInputName(i);
2284
1.73k
    TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2285
1.73k
    bool IsValid =
2286
1.73k
      getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2287
1.73k
    assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2288
1.73k
    InputConstraintInfos.push_back(Info);
2289
1.73k
  }
2290
2291
1.80k
  std::string Constraints;
2292
2293
1.80k
  std::vector<LValue> ResultRegDests;
2294
1.80k
  std::vector<QualType> ResultRegQualTys;
2295
1.80k
  std::vector<llvm::Type *> ResultRegTypes;
2296
1.80k
  std::vector<llvm::Type *> ResultTruncRegTypes;
2297
1.80k
  std::vector<llvm::Type *> ArgTypes;
2298
1.80k
  std::vector<llvm::Type *> ArgElemTypes;
2299
1.80k
  std::vector<llvm::Value*> Args;
2300
1.80k
  llvm::BitVector ResultTypeRequiresCast;
2301
2302
  // Keep track of inout constraints.
2303
1.80k
  std::string InOutConstraints;
2304
1.80k
  std::vector<llvm::Value*> InOutArgs;
2305
1.80k
  std::vector<llvm::Type*> InOutArgTypes;
2306
1.80k
  std::vector<llvm::Type*> InOutArgElemTypes;
2307
2308
  // Keep track of out constraints for tied input operand.
2309
1.80k
  std::vector<std::string> OutputConstraints;
2310
2311
  // Keep track of defined physregs.
2312
1.80k
  llvm::SmallSet<std::string, 8> PhysRegOutputs;
2313
2314
  // An inline asm can be marked readonly if it meets the following conditions:
2315
  //  - it doesn't have any sideeffects
2316
  //  - it doesn't clobber memory
2317
  //  - it doesn't return a value by-reference
2318
  // It can be marked readnone if it doesn't have any input memory constraints
2319
  // in addition to meeting the conditions listed above.
2320
1.80k
  bool ReadOnly = true, ReadNone = true;
2321
2322
3.08k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.28k
) {
2323
1.28k
    TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2324
2325
    // Simplify the output constraint.
2326
1.28k
    std::string OutputConstraint(S.getOutputConstraint(i));
2327
1.28k
    OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2328
1.28k
                                          getTarget(), &OutputConstraintInfos);
2329
2330
1.28k
    const Expr *OutExpr = S.getOutputExpr(i);
2331
1.28k
    OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2332
2333
1.28k
    std::string GCCReg;
2334
1.28k
    OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2335
1.28k
                                              getTarget(), CGM, S,
2336
1.28k
                                              Info.earlyClobber(),
2337
1.28k
                                              &GCCReg);
2338
    // Give an error on multiple outputs to same physreg.
2339
1.28k
    if (!GCCReg.empty() && 
!PhysRegOutputs.insert(GCCReg).second12
)
2340
0
      CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2341
2342
1.28k
    OutputConstraints.push_back(OutputConstraint);
2343
1.28k
    LValue Dest = EmitLValue(OutExpr);
2344
1.28k
    if (!Constraints.empty())
2345
295
      Constraints += ',';
2346
2347
    // If this is a register output, then make the inline asm return it
2348
    // by-value.  If this is a memory result, return the value by-reference.
2349
1.28k
    QualType QTy = OutExpr->getType();
2350
1.28k
    const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2351
1.28k
                                     
hasAggregateEvaluationKind(QTy)19
;
2352
1.28k
    if (!Info.allowsMemory() && 
IsScalarOrAggregate1.08k
) {
2353
2354
1.08k
      Constraints += "=" + OutputConstraint;
2355
1.08k
      ResultRegQualTys.push_back(QTy);
2356
1.08k
      ResultRegDests.push_back(Dest);
2357
2358
1.08k
      llvm::Type *Ty = ConvertTypeForMem(QTy);
2359
1.08k
      const bool RequiresCast = Info.allowsRegister() &&
2360
1.08k
          (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2361
1.08k
           
Ty->isAggregateType()1.08k
);
2362
2363
1.08k
      ResultTruncRegTypes.push_back(Ty);
2364
1.08k
      ResultTypeRequiresCast.push_back(RequiresCast);
2365
2366
1.08k
      if (RequiresCast) {
2367
17
        unsigned Size = getContext().getTypeSize(QTy);
2368
17
        Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2369
17
      }
2370
1.08k
      ResultRegTypes.push_back(Ty);
2371
      // If this output is tied to an input, and if the input is larger, then
2372
      // we need to set the actual result type of the inline asm node to be the
2373
      // same as the input type.
2374
1.08k
      if (Info.hasMatchingInput()) {
2375
55
        unsigned InputNo;
2376
68
        for (InputNo = 0; InputNo != S.getNumInputs(); 
++InputNo13
) {
2377
68
          TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2378
68
          if (Input.hasTiedOperand() && 
Input.getTiedOperand() == i67
)
2379
55
            break;
2380
68
        }
2381
55
        assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2382
2383
0
        QualType InputTy = S.getInputExpr(InputNo)->getType();
2384
55
        QualType OutputType = OutExpr->getType();
2385
2386
55
        uint64_t InputSize = getContext().getTypeSize(InputTy);
2387
55
        if (getContext().getTypeSize(OutputType) < InputSize) {
2388
          // Form the asm to return the value as a larger integer or fp type.
2389
4
          ResultRegTypes.back() = ConvertType(InputTy);
2390
4
        }
2391
55
      }
2392
1.08k
      if (llvm::Type* AdjTy =
2393
1.08k
            getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2394
1.08k
                                                 ResultRegTypes.back()))
2395
1.08k
        ResultRegTypes.back() = AdjTy;
2396
0
      else {
2397
0
        CGM.getDiags().Report(S.getAsmLoc(),
2398
0
                              diag::err_asm_invalid_type_in_input)
2399
0
            << OutExpr->getType() << OutputConstraint;
2400
0
      }
2401
2402
      // Update largest vector width for any vector types.
2403
1.08k
      if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2404
113
        LargestVectorWidth =
2405
113
            std::max((uint64_t)LargestVectorWidth,
2406
113
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2407
1.08k
    } else {
2408
198
      Address DestAddr = Dest.getAddress(*this);
2409
      // Matrix types in memory are represented by arrays, but accessed through
2410
      // vector pointers, with the alignment specified on the access operation.
2411
      // For inline assembly, update pointer arguments to use vector pointers.
2412
      // Otherwise there will be a mis-match if the matrix is also an
2413
      // input-argument which is represented as vector.
2414
198
      if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2415
1
        DestAddr = Builder.CreateElementBitCast(
2416
1
            DestAddr, ConvertType(OutExpr->getType()));
2417
2418
198
      ArgTypes.push_back(DestAddr.getType());
2419
198
      ArgElemTypes.push_back(DestAddr.getElementType());
2420
198
      Args.push_back(DestAddr.getPointer());
2421
198
      Constraints += "=*";
2422
198
      Constraints += OutputConstraint;
2423
198
      ReadOnly = ReadNone = false;
2424
198
    }
2425
2426
1.28k
    if (Info.isReadWrite()) {
2427
189
      InOutConstraints += ',';
2428
2429
189
      const Expr *InputExpr = S.getOutputExpr(i);
2430
189
      llvm::Value *Arg;
2431
189
      llvm::Type *ArgElemType;
2432
189
      std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2433
189
          Info, Dest, InputExpr->getType(), InOutConstraints,
2434
189
          InputExpr->getExprLoc());
2435
2436
189
      if (llvm::Type* AdjTy =
2437
189
          getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2438
189
                                               Arg->getType()))
2439
189
        Arg = Builder.CreateBitCast(Arg, AdjTy);
2440
2441
      // Update largest vector width for any vector types.
2442
189
      if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2443
2
        LargestVectorWidth =
2444
2
            std::max((uint64_t)LargestVectorWidth,
2445
2
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2446
      // Only tie earlyclobber physregs.
2447
189
      if (Info.allowsRegister() && 
(162
GCCReg.empty()162
||
Info.earlyClobber()3
))
2448
160
        InOutConstraints += llvm::utostr(i);
2449
29
      else
2450
29
        InOutConstraints += OutputConstraint;
2451
2452
189
      InOutArgTypes.push_back(Arg->getType());
2453
189
      InOutArgElemTypes.push_back(ArgElemType);
2454
189
      InOutArgs.push_back(Arg);
2455
189
    }
2456
1.28k
  }
2457
2458
  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2459
  // to the return value slot. Only do this when returning in registers.
2460
1.80k
  if (isa<MSAsmStmt>(&S)) {
2461
165
    const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2462
165
    if (RetAI.isDirect() || 
RetAI.isExtend()138
) {
2463
      // Make a fake lvalue for the return value slot.
2464
30
      LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
2465
30
      CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2466
30
          *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2467
30
          ResultRegDests, AsmString, S.getNumOutputs());
2468
30
      SawAsmBlock = true;
2469
30
    }
2470
165
  }
2471
2472
3.53k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.73k
) {
2473
1.73k
    const Expr *InputExpr = S.getInputExpr(i);
2474
2475
1.73k
    TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2476
2477
1.73k
    if (Info.allowsMemory())
2478
583
      ReadNone = false;
2479
2480
1.73k
    if (!Constraints.empty())
2481
1.47k
      Constraints += ',';
2482
2483
    // Simplify the input constraint.
2484
1.73k
    std::string InputConstraint(S.getInputConstraint(i));
2485
1.73k
    InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2486
1.73k
                                         &OutputConstraintInfos);
2487
2488
1.73k
    InputConstraint = AddVariableConstraints(
2489
1.73k
        InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2490
1.73k
        getTarget(), CGM, S, false /* No EarlyClobber */);
2491
2492
1.73k
    std::string ReplaceConstraint (InputConstraint);
2493
1.73k
    llvm::Value *Arg;
2494
1.73k
    llvm::Type *ArgElemType;
2495
1.73k
    std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2496
2497
    // If this input argument is tied to a larger output result, extend the
2498
    // input to be the same size as the output.  The LLVM backend wants to see
2499
    // the input and output of a matching constraint be the same size.  Note
2500
    // that GCC does not define what the top bits are here.  We use zext because
2501
    // that is usually cheaper, but LLVM IR should really get an anyext someday.
2502
1.73k
    if (Info.hasTiedOperand()) {
2503
55
      unsigned Output = Info.getTiedOperand();
2504
55
      QualType OutputType = S.getOutputExpr(Output)->getType();
2505
55
      QualType InputTy = InputExpr->getType();
2506
2507
55
      if (getContext().getTypeSize(OutputType) >
2508
55
          getContext().getTypeSize(InputTy)) {
2509
        // Use ptrtoint as appropriate so that we can do our extension.
2510
3
        if (isa<llvm::PointerType>(Arg->getType()))
2511
0
          Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2512
3
        llvm::Type *OutputTy = ConvertType(OutputType);
2513
3
        if (isa<llvm::IntegerType>(OutputTy))
2514
1
          Arg = Builder.CreateZExt(Arg, OutputTy);
2515
2
        else if (isa<llvm::PointerType>(OutputTy))
2516
1
          Arg = Builder.CreateZExt(Arg, IntPtrTy);
2517
1
        else {
2518
1
          assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2519
0
          Arg = Builder.CreateFPExt(Arg, OutputTy);
2520
1
        }
2521
3
      }
2522
      // Deal with the tied operands' constraint code in adjustInlineAsmType.
2523
0
      ReplaceConstraint = OutputConstraints[Output];
2524
55
    }
2525
1.73k
    if (llvm::Type* AdjTy =
2526
1.73k
          getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2527
1.73k
                                                   Arg->getType()))
2528
1.73k
      Arg = Builder.CreateBitCast(Arg, AdjTy);
2529
0
    else
2530
0
      CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2531
0
          << InputExpr->getType() << InputConstraint;
2532
2533
    // Update largest vector width for any vector types.
2534
1.73k
    if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2535
123
      LargestVectorWidth =
2536
123
          std::max((uint64_t)LargestVectorWidth,
2537
123
                   VT->getPrimitiveSizeInBits().getKnownMinSize());
2538
2539
1.73k
    ArgTypes.push_back(Arg->getType());
2540
1.73k
    ArgElemTypes.push_back(ArgElemType);
2541
1.73k
    Args.push_back(Arg);
2542
1.73k
    Constraints += InputConstraint;
2543
1.73k
  }
2544
2545
  // Append the "input" part of inout constraints.
2546
1.98k
  for (unsigned i = 0, e = InOutArgs.size(); i != e; 
i++189
) {
2547
189
    ArgTypes.push_back(InOutArgTypes[i]);
2548
189
    ArgElemTypes.push_back(InOutArgElemTypes[i]);
2549
189
    Args.push_back(InOutArgs[i]);
2550
189
  }
2551
1.80k
  Constraints += InOutConstraints;
2552
2553
  // Labels
2554
1.80k
  SmallVector<llvm::BasicBlock *, 16> Transfer;
2555
1.80k
  llvm::BasicBlock *Fallthrough = nullptr;
2556
1.80k
  bool IsGCCAsmGoto = false;
2557
1.80k
  if (const auto *GS =  dyn_cast<GCCAsmStmt>(&S)) {
2558
1.63k
    IsGCCAsmGoto = GS->isAsmGoto();
2559
1.63k
    if (IsGCCAsmGoto) {
2560
50
      for (const auto *E : GS->labels()) {
2561
50
        JumpDest Dest = getJumpDestForLabel(E->getLabel());
2562
50
        Transfer.push_back(Dest.getBlock());
2563
50
        llvm::BlockAddress *BA =
2564
50
            llvm::BlockAddress::get(CurFn, Dest.getBlock());
2565
50
        Args.push_back(BA);
2566
50
        ArgTypes.push_back(BA->getType());
2567
50
        ArgElemTypes.push_back(nullptr);
2568
50
        if (!Constraints.empty())
2569
45
          Constraints += ',';
2570
50
        Constraints += 'i';
2571
50
      }
2572
31
      Fallthrough = createBasicBlock("asm.fallthrough");
2573
31
    }
2574
1.63k
  }
2575
2576
1.80k
  bool HasUnwindClobber = false;
2577
2578
  // Clobbers
2579
3.10k
  for (unsigned i = 0, e = S.getNumClobbers(); i != e; 
i++1.30k
) {
2580
1.30k
    StringRef Clobber = S.getClobber(i);
2581
2582
1.30k
    if (Clobber == "memory")
2583
154
      ReadOnly = ReadNone = false;
2584
1.14k
    else if (Clobber == "unwind") {
2585
1
      HasUnwindClobber = true;
2586
1
      continue;
2587
1.14k
    } else if (Clobber != "cc") {
2588
1.08k
      Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2589
1.08k
      if (CGM.getCodeGenOpts().StackClashProtector &&
2590
1.08k
          
getTarget().isSPRegName(Clobber)3
) {
2591
3
        CGM.getDiags().Report(S.getAsmLoc(),
2592
3
                              diag::warn_stack_clash_protection_inline_asm);
2593
3
      }
2594
1.08k
    }
2595
2596
1.30k
    if (isa<MSAsmStmt>(&S)) {
2597
194
      if (Clobber == "eax" || 
Clobber == "edx"93
) {
2598
117
        if (Constraints.find("=&A") != std::string::npos)
2599
3
          continue;
2600
114
        std::string::size_type position1 =
2601
114
            Constraints.find("={" + Clobber.str() + "}");
2602
114
        if (position1 != std::string::npos) {
2603
13
          Constraints.insert(position1 + 1, "&");
2604
13
          continue;
2605
13
        }
2606
101
        std::string::size_type position2 = Constraints.find("=A");
2607
101
        if (position2 != std::string::npos) {
2608
3
          Constraints.insert(position2 + 1, "&");
2609
3
          continue;
2610
3
        }
2611
101
      }
2612
194
    }
2613
1.28k
    if (!Constraints.empty())
2614
907
      Constraints += ',';
2615
2616
1.28k
    Constraints += "~{";
2617
1.28k
    Constraints += Clobber;
2618
1.28k
    Constraints += '}';
2619
1.28k
  }
2620
2621
1.80k
  assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2622
1.80k
         "unwind clobber can't be used with asm goto");
2623
2624
  // Add machine specific clobbers
2625
0
  std::string MachineClobbers = getTarget().getClobbers();
2626
1.80k
  if (!MachineClobbers.empty()) {
2627
1.11k
    if (!Constraints.empty())
2628
968
      Constraints += ',';
2629
1.11k
    Constraints += MachineClobbers;
2630
1.11k
  }
2631
2632
1.80k
  llvm::Type *ResultType;
2633
1.80k
  if (ResultRegTypes.empty())
2634
915
    ResultType = VoidTy;
2635
885
  else if (ResultRegTypes.size() == 1)
2636
778
    ResultType = ResultRegTypes[0];
2637
107
  else
2638
107
    ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2639
2640
1.80k
  llvm::FunctionType *FTy =
2641
1.80k
    llvm::FunctionType::get(ResultType, ArgTypes, false);
2642
2643
1.80k
  bool HasSideEffect = S.isVolatile() || 
S.getNumOutputs() == 01.08k
;
2644
2645
1.80k
  llvm::InlineAsm::AsmDialect GnuAsmDialect =
2646
1.80k
      CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2647
1.80k
          ? 
llvm::InlineAsm::AD_ATT1.70k
2648
1.80k
          : 
llvm::InlineAsm::AD_Intel92
;
2649
1.80k
  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2650
1.63k
    
llvm::InlineAsm::AD_Intel165
: GnuAsmDialect;
2651
2652
1.80k
  llvm::InlineAsm *IA = llvm::InlineAsm::get(
2653
1.80k
      FTy, AsmString, Constraints, HasSideEffect,
2654
1.80k
      /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2655
1.80k
  std::vector<llvm::Value*> RegResults;
2656
1.80k
  if (IsGCCAsmGoto) {
2657
31
    llvm::CallBrInst *Result =
2658
31
        Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2659
31
    EmitBlock(Fallthrough);
2660
31
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2661
31
                      ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2662
31
                      ResultRegTypes, ArgElemTypes, *this, RegResults);
2663
1.76k
  } else if (HasUnwindClobber) {
2664
1
    llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2665
1
    UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2666
1
                      InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2667
1
                      *this, RegResults);
2668
1.76k
  } else {
2669
1.76k
    llvm::CallInst *Result =
2670
1.76k
        Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2671
1.76k
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2672
1.76k
                      ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2673
1.76k
                      ResultRegTypes, ArgElemTypes, *this, RegResults);
2674
1.76k
  }
2675
2676
1.80k
  assert(RegResults.size() == ResultRegTypes.size());
2677
0
  assert(RegResults.size() == ResultTruncRegTypes.size());
2678
0
  assert(RegResults.size() == ResultRegDests.size());
2679
  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2680
  // in which case its size may grow.
2681
0
  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2682
2.91k
  for (unsigned i = 0, e = RegResults.size(); i != e; 
++i1.11k
) {
2683
1.11k
    llvm::Value *Tmp = RegResults[i];
2684
1.11k
    llvm::Type *TruncTy = ResultTruncRegTypes[i];
2685
2686
    // If the result type of the LLVM IR asm doesn't match the result type of
2687
    // the expression, do the conversion.
2688
1.11k
    if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2689
2690
      // Truncate the integer result to the right size, note that TruncTy can be
2691
      // a pointer.
2692
35
      if (TruncTy->isFloatingPointTy())
2693
1
        Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2694
34
      else if (TruncTy->isPointerTy() && 
Tmp->getType()->isIntegerTy()0
) {
2695
0
        uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2696
0
        Tmp = Builder.CreateTrunc(Tmp,
2697
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2698
0
        Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2699
34
      } else if (Tmp->getType()->isPointerTy() && 
TruncTy->isIntegerTy()0
) {
2700
0
        uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2701
0
        Tmp = Builder.CreatePtrToInt(Tmp,
2702
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2703
0
        Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2704
34
      } else if (TruncTy->isIntegerTy()) {
2705
7
        Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2706
27
      } else if (TruncTy->isVectorTy()) {
2707
10
        Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2708
10
      }
2709
35
    }
2710
2711
1.11k
    LValue Dest = ResultRegDests[i];
2712
    // ResultTypeRequiresCast elements correspond to the first
2713
    // ResultTypeRequiresCast.size() elements of RegResults.
2714
1.11k
    if ((i < ResultTypeRequiresCast.size()) && 
ResultTypeRequiresCast[i]1.08k
) {
2715
17
      unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2716
17
      Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2717
17
                                        ResultRegTypes[i]->getPointerTo());
2718
17
      if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
2719
1
        Builder.CreateStore(Tmp, A);
2720
1
        continue;
2721
1
      }
2722
2723
16
      QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2724
16
      if (Ty.isNull()) {
2725
3
        const Expr *OutExpr = S.getOutputExpr(i);
2726
3
        CGM.Error(
2727
3
            OutExpr->getExprLoc(),
2728
3
            "impossible constraint in asm: can't store value into a register");
2729
3
        return;
2730
3
      }
2731
13
      Dest = MakeAddrLValue(A, Ty);
2732
13
    }
2733
1.11k
    EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2734
1.11k
  }
2735
1.80k
}
2736
2737
970
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2738
970
  const RecordDecl *RD = S.getCapturedRecordDecl();
2739
970
  QualType RecordTy = getContext().getRecordType(RD);
2740
2741
  // Initialize the captured struct.
2742
970
  LValue SlotLV =
2743
970
    MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2744
2745
970
  RecordDecl::field_iterator CurField = RD->field_begin();
2746
970
  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2747
970
                                                 E = S.capture_init_end();
2748
2.32k
       I != E; 
++I, ++CurField1.35k
) {
2749
1.35k
    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2750
1.35k
    if (CurField->hasCapturedVLAType()) {
2751
47
      EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2752
1.30k
    } else {
2753
1.30k
      EmitInitializerForField(*CurField, LV, *I);
2754
1.30k
    }
2755
1.35k
  }
2756
2757
970
  return SlotLV;
2758
970
}
2759
2760
/// Generate an outlined function for the body of a CapturedStmt, store any
2761
/// captured variables into the captured struct, and call the outlined function.
2762
llvm::Function *
2763
27
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2764
27
  LValue CapStruct = InitCapturedStruct(S);
2765
2766
  // Emit the CapturedDecl
2767
27
  CodeGenFunction CGF(CGM, true);
2768
27
  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2769
27
  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2770
27
  delete CGF.CapturedStmtInfo;
2771
2772
  // Emit call to the helper function.
2773
27
  EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2774
2775
27
  return F;
2776
27
}
2777
2778
879
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2779
879
  LValue CapStruct = InitCapturedStruct(S);
2780
879
  return CapStruct.getAddress(*this);
2781
879
}
2782
2783
/// Creates the outlined function for a CapturedStmt.
2784
llvm::Function *
2785
970
CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2786
970
  assert(CapturedStmtInfo &&
2787
970
    "CapturedStmtInfo should be set when generating the captured function");
2788
0
  const CapturedDecl *CD = S.getCapturedDecl();
2789
970
  const RecordDecl *RD = S.getCapturedRecordDecl();
2790
970
  SourceLocation Loc = S.getBeginLoc();
2791
970
  assert(CD->hasBody() && "missing CapturedDecl body");
2792
2793
  // Build the argument list.
2794
0
  ASTContext &Ctx = CGM.getContext();
2795
970
  FunctionArgList Args;
2796
970
  Args.append(CD->param_begin(), CD->param_end());
2797
2798
  // Create the function declaration.
2799
970
  const CGFunctionInfo &FuncInfo =
2800
970
    CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2801
970
  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2802
2803
970
  llvm::Function *F =
2804
970
    llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2805
970
                           CapturedStmtInfo->getHelperName(), &CGM.getModule());
2806
970
  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2807
970
  if (CD->isNothrow())
2808
725
    F->addFnAttr(llvm::Attribute::NoUnwind);
2809
2810
  // Generate the function.
2811
970
  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2812
970
                CD->getBody()->getBeginLoc());
2813
  // Set the context parameter in CapturedStmtInfo.
2814
970
  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2815
970
  CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2816
2817
  // Initialize variable-length arrays.
2818
970
  LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2819
970
                                           Ctx.getTagDeclType(RD));
2820
1.35k
  for (auto *FD : RD->fields()) {
2821
1.35k
    if (FD->hasCapturedVLAType()) {
2822
47
      auto *ExprArg =
2823
47
          EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2824
47
              .getScalarVal();
2825
47
      auto VAT = FD->getCapturedVLAType();
2826
47
      VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2827
47
    }
2828
1.35k
  }
2829
2830
  // If 'this' is captured, load it into CXXThisValue.
2831
970
  if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2832
24
    FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2833
24
    LValue ThisLValue = EmitLValueForField(Base, FD);
2834
24
    CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2835
24
  }
2836
2837
970
  PGO.assignRegionCounters(GlobalDecl(CD), F);
2838
970
  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2839
970
  FinishFunction(CD->getBodyRBrace());
2840
2841
970
  return F;
2842
970
}