Coverage Report

Created: 2022-07-16 07:03

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGStmt.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Stmt nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGDebugInfo.h"
14
#include "CGOpenMPRuntime.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "TargetInfo.h"
18
#include "clang/AST/Attr.h"
19
#include "clang/AST/Expr.h"
20
#include "clang/AST/Stmt.h"
21
#include "clang/AST/StmtVisitor.h"
22
#include "clang/Basic/Builtins.h"
23
#include "clang/Basic/DiagnosticSema.h"
24
#include "clang/Basic/PrettyStackTrace.h"
25
#include "clang/Basic/SourceManager.h"
26
#include "clang/Basic/TargetInfo.h"
27
#include "llvm/ADT/SmallSet.h"
28
#include "llvm/ADT/StringExtras.h"
29
#include "llvm/IR/Assumptions.h"
30
#include "llvm/IR/DataLayout.h"
31
#include "llvm/IR/InlineAsm.h"
32
#include "llvm/IR/Intrinsics.h"
33
#include "llvm/IR/MDBuilder.h"
34
#include "llvm/Support/SaveAndRestore.h"
35
36
using namespace clang;
37
using namespace CodeGen;
38
39
//===----------------------------------------------------------------------===//
40
//                              Statement Emission
41
//===----------------------------------------------------------------------===//
42
43
891k
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
44
891k
  if (CGDebugInfo *DI = getDebugInfo()) {
45
515k
    SourceLocation Loc;
46
515k
    Loc = S->getBeginLoc();
47
515k
    DI->EmitLocation(Builder, Loc);
48
49
515k
    LastStopPoint = Loc;
50
515k
  }
51
891k
}
52
53
983k
void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
54
983k
  assert(S && "Null statement?");
55
0
  PGO.setCurrentStmt(S);
56
57
  // These statements have their own debug info handling.
58
983k
  if (EmitSimpleStmt(S, Attrs))
59
343k
    return;
60
61
  // Check if we are generating unreachable code.
62
640k
  if (!HaveInsertPoint()) {
63
    // If so, and the statement doesn't contain a label, then we do not need to
64
    // generate actual code. This is safe because (1) the current point is
65
    // unreachable, so we don't need to execute the code, and (2) we've already
66
    // handled the statements which update internal data structures (like the
67
    // local variable map) which could be used by subsequent statements.
68
178
    if (!ContainsLabel(S)) {
69
      // Verify that any decl statements were handled as simple, they may be in
70
      // scope of subsequent reachable statements.
71
168
      assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
72
0
      return;
73
168
    }
74
75
    // Otherwise, make a new block to hold the code.
76
10
    EnsureInsertPoint();
77
10
  }
78
79
  // Generate a stoppoint if we are emitting debug info.
80
639k
  EmitStopPoint(S);
81
82
  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
83
  // enabled.
84
639k
  if (getLangOpts().OpenMP && 
getLangOpts().OpenMPSimd114k
) {
85
62.9k
    if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
86
18.6k
      EmitSimpleOMPExecutableDirective(*D);
87
18.6k
      return;
88
18.6k
    }
89
62.9k
  }
90
91
621k
  switch (S->getStmtClass()) {
92
0
  case Stmt::NoStmtClass:
93
0
  case Stmt::CXXCatchStmtClass:
94
0
  case Stmt::SEHExceptStmtClass:
95
0
  case Stmt::SEHFinallyStmtClass:
96
0
  case Stmt::MSDependentExistsStmtClass:
97
0
    llvm_unreachable("invalid statement class to emit generically");
98
0
  case Stmt::NullStmtClass:
99
0
  case Stmt::CompoundStmtClass:
100
0
  case Stmt::DeclStmtClass:
101
0
  case Stmt::LabelStmtClass:
102
0
  case Stmt::AttributedStmtClass:
103
0
  case Stmt::GotoStmtClass:
104
0
  case Stmt::BreakStmtClass:
105
0
  case Stmt::ContinueStmtClass:
106
0
  case Stmt::DefaultStmtClass:
107
0
  case Stmt::CaseStmtClass:
108
0
  case Stmt::SEHLeaveStmtClass:
109
0
    llvm_unreachable("should have emitted these statements as simple");
110
111
0
#define STMT(Type, Base)
112
0
#define ABSTRACT_STMT(Op)
113
0
#define EXPR(Type, Base) \
114
27.2M
  case Stmt::Type##Class:
115
300k
#include 
"clang/AST/StmtNodes.inc"0
116
300k
  {
117
    // Remember the block we came in on.
118
300k
    llvm::BasicBlock *incoming = Builder.GetInsertBlock();
119
300k
    assert(incoming && "expression emission must have an insertion point");
120
121
0
    EmitIgnoredExpr(cast<Expr>(S));
122
123
300k
    llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
124
300k
    assert(outgoing && "expression emission cleared block!");
125
126
    // The expression emitters assume (reasonably!) that the insertion
127
    // point is always set.  To maintain that, the call-emission code
128
    // for noreturn functions has to enter a new block with no
129
    // predecessors.  We want to kill that block and mark the current
130
    // insertion point unreachable in the common case of a call like
131
    // "exit();".  Since expression emission doesn't otherwise create
132
    // blocks with no predecessors, we can just test for that.
133
    // However, we must be careful not to do this to our incoming
134
    // block, because *statement* emission does sometimes create
135
    // reachable blocks which will have no predecessors until later in
136
    // the function.  This occurs with, e.g., labels that are not
137
    // reachable by fallthrough.
138
300k
    if (incoming != outgoing && 
outgoing->use_empty()22.7k
) {
139
2.96k
      outgoing->eraseFromParent();
140
2.96k
      Builder.ClearInsertionPoint();
141
2.96k
    }
142
300k
    break;
143
26.9M
  }
144
145
22
  case Stmt::IndirectGotoStmtClass:
146
22
    EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
147
148
121k
  case Stmt::IfStmtClass:      EmitIfStmt(cast<IfStmt>(*S));              break;
149
1.87k
  case Stmt::WhileStmtClass:   EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
150
771
  case Stmt::DoStmtClass:      EmitDoStmt(cast<DoStmt>(*S), Attrs);       break;
151
17.1k
  case Stmt::ForStmtClass:     EmitForStmt(cast<ForStmt>(*S), Attrs);     break;
152
153
153k
  case Stmt::ReturnStmtClass:  EmitReturnStmt(cast<ReturnStmt>(*S));      break;
154
155
450
  case Stmt::SwitchStmtClass:  EmitSwitchStmt(cast<SwitchStmt>(*S));      break;
156
1.70k
  case Stmt::GCCAsmStmtClass:  // Intentional fall-through.
157
1.87k
  case Stmt::MSAsmStmtClass:   EmitAsmStmt(cast<AsmStmt>(*S));            break;
158
118
  case Stmt::CoroutineBodyStmtClass:
159
118
    EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
160
118
    break;
161
114
  case Stmt::CoreturnStmtClass:
162
114
    EmitCoreturnStmt(cast<CoreturnStmt>(*S));
163
114
    break;
164
27
  case Stmt::CapturedStmtClass: {
165
27
    const CapturedStmt *CS = cast<CapturedStmt>(S);
166
27
    EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
167
27
    }
168
27
    break;
169
220
  case Stmt::ObjCAtTryStmtClass:
170
220
    EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
171
220
    break;
172
0
  case Stmt::ObjCAtCatchStmtClass:
173
0
    llvm_unreachable(
174
0
                    "@catch statements should be handled by EmitObjCAtTryStmt");
175
0
  case Stmt::ObjCAtFinallyStmtClass:
176
0
    llvm_unreachable(
177
0
                  "@finally statements should be handled by EmitObjCAtTryStmt");
178
50
  case Stmt::ObjCAtThrowStmtClass:
179
50
    EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
180
50
    break;
181
13
  case Stmt::ObjCAtSynchronizedStmtClass:
182
13
    EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
183
13
    break;
184
68
  case Stmt::ObjCForCollectionStmtClass:
185
68
    EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
186
68
    break;
187
113
  case Stmt::ObjCAutoreleasePoolStmtClass:
188
113
    EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
189
113
    break;
190
191
279
  case Stmt::CXXTryStmtClass:
192
279
    EmitCXXTryStmt(cast<CXXTryStmt>(*S));
193
279
    break;
194
126
  case Stmt::CXXForRangeStmtClass:
195
126
    EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
196
126
    break;
197
137
  case Stmt::SEHTryStmtClass:
198
137
    EmitSEHTryStmt(cast<SEHTryStmt>(*S));
199
137
    break;
200
0
  case Stmt::OMPMetaDirectiveClass:
201
0
    EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
202
0
    break;
203
42
  case Stmt::OMPCanonicalLoopClass:
204
42
    EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
205
42
    break;
206
1.05k
  case Stmt::OMPParallelDirectiveClass:
207
1.05k
    EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
208
1.05k
    break;
209
185
  case Stmt::OMPSimdDirectiveClass:
210
185
    EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
211
185
    break;
212
8
  case Stmt::OMPTileDirectiveClass:
213
8
    EmitOMPTileDirective(cast<OMPTileDirective>(*S));
214
8
    break;
215
18
  case Stmt::OMPUnrollDirectiveClass:
216
18
    EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
217
18
    break;
218
429
  case Stmt::OMPForDirectiveClass:
219
429
    EmitOMPForDirective(cast<OMPForDirective>(*S));
220
429
    break;
221
253
  case Stmt::OMPForSimdDirectiveClass:
222
253
    EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
223
253
    break;
224
62
  case Stmt::OMPSectionsDirectiveClass:
225
62
    EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
226
62
    break;
227
54
  case Stmt::OMPSectionDirectiveClass:
228
54
    EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
229
54
    break;
230
61
  case Stmt::OMPSingleDirectiveClass:
231
61
    EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
232
61
    break;
233
25
  case Stmt::OMPMasterDirectiveClass:
234
25
    EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
235
25
    break;
236
105
  case Stmt::OMPCriticalDirectiveClass:
237
105
    EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
238
105
    break;
239
266
  case Stmt::OMPParallelForDirectiveClass:
240
266
    EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
241
266
    break;
242
105
  case Stmt::OMPParallelForSimdDirectiveClass:
243
105
    EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
244
105
    break;
245
22
  case Stmt::OMPParallelMasterDirectiveClass:
246
22
    EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
247
22
    break;
248
26
  case Stmt::OMPParallelSectionsDirectiveClass:
249
26
    EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
250
26
    break;
251
242
  case Stmt::OMPTaskDirectiveClass:
252
242
    EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
253
242
    break;
254
19
  case Stmt::OMPTaskyieldDirectiveClass:
255
19
    EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
256
19
    break;
257
30
  case Stmt::OMPBarrierDirectiveClass:
258
30
    EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
259
30
    break;
260
17
  case Stmt::OMPTaskwaitDirectiveClass:
261
17
    EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
262
17
    break;
263
43
  case Stmt::OMPTaskgroupDirectiveClass:
264
43
    EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
265
43
    break;
266
40
  case Stmt::OMPFlushDirectiveClass:
267
40
    EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
268
40
    break;
269
14
  case Stmt::OMPDepobjDirectiveClass:
270
14
    EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
271
14
    break;
272
36
  case Stmt::OMPScanDirectiveClass:
273
36
    EmitOMPScanDirective(cast<OMPScanDirective>(*S));
274
36
    break;
275
80
  case Stmt::OMPOrderedDirectiveClass:
276
80
    EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
277
80
    break;
278
4.77k
  case Stmt::OMPAtomicDirectiveClass:
279
4.77k
    EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
280
4.77k
    break;
281
5.04k
  case Stmt::OMPTargetDirectiveClass:
282
5.04k
    EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
283
5.04k
    break;
284
1.00k
  case Stmt::OMPTeamsDirectiveClass:
285
1.00k
    EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
286
1.00k
    break;
287
48
  case Stmt::OMPCancellationPointDirectiveClass:
288
48
    EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
289
48
    break;
290
163
  case Stmt::OMPCancelDirectiveClass:
291
163
    EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
292
163
    break;
293
163
  case Stmt::OMPTargetDataDirectiveClass:
294
163
    EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
295
163
    break;
296
107
  case Stmt::OMPTargetEnterDataDirectiveClass:
297
107
    EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
298
107
    break;
299
77
  case Stmt::OMPTargetExitDataDirectiveClass:
300
77
    EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
301
77
    break;
302
541
  case Stmt::OMPTargetParallelDirectiveClass:
303
541
    EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
304
541
    break;
305
344
  case Stmt::OMPTargetParallelForDirectiveClass:
306
344
    EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
307
344
    break;
308
39
  case Stmt::OMPTaskLoopDirectiveClass:
309
39
    EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
310
39
    break;
311
40
  case Stmt::OMPTaskLoopSimdDirectiveClass:
312
40
    EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
313
40
    break;
314
35
  case Stmt::OMPMasterTaskLoopDirectiveClass:
315
35
    EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
316
35
    break;
317
0
  case Stmt::OMPMaskedTaskLoopDirectiveClass:
318
0
    llvm_unreachable("masked taskloop directive not supported yet.");
319
0
    break;
320
40
  case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
321
40
    EmitOMPMasterTaskLoopSimdDirective(
322
40
        cast<OMPMasterTaskLoopSimdDirective>(*S));
323
40
    break;
324
0
  case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
325
0
    llvm_unreachable("masked taskloop simd directive not supported yet.");
326
0
    break;
327
33
  case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
328
33
    EmitOMPParallelMasterTaskLoopDirective(
329
33
        cast<OMPParallelMasterTaskLoopDirective>(*S));
330
33
    break;
331
0
  case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
332
0
    llvm_unreachable("parallel masked taskloop directive not supported yet.");
333
0
    break;
334
39
  case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
335
39
    EmitOMPParallelMasterTaskLoopSimdDirective(
336
39
        cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
337
39
    break;
338
0
  case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
339
0
    llvm_unreachable(
340
0
        "parallel masked taskloop simd directive not supported yet.");
341
0
    break;
342
110
  case Stmt::OMPDistributeDirectiveClass:
343
110
    EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
344
110
    break;
345
233
  case Stmt::OMPTargetUpdateDirectiveClass:
346
233
    EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
347
233
    break;
348
401
  case Stmt::OMPDistributeParallelForDirectiveClass:
349
401
    EmitOMPDistributeParallelForDirective(
350
401
        cast<OMPDistributeParallelForDirective>(*S));
351
401
    break;
352
313
  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
353
313
    EmitOMPDistributeParallelForSimdDirective(
354
313
        cast<OMPDistributeParallelForSimdDirective>(*S));
355
313
    break;
356
150
  case Stmt::OMPDistributeSimdDirectiveClass:
357
150
    EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
358
150
    break;
359
310
  case Stmt::OMPTargetParallelForSimdDirectiveClass:
360
310
    EmitOMPTargetParallelForSimdDirective(
361
310
        cast<OMPTargetParallelForSimdDirective>(*S));
362
310
    break;
363
355
  case Stmt::OMPTargetSimdDirectiveClass:
364
355
    EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
365
355
    break;
366
114
  case Stmt::OMPTeamsDistributeDirectiveClass:
367
114
    EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
368
114
    break;
369
126
  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
370
126
    EmitOMPTeamsDistributeSimdDirective(
371
126
        cast<OMPTeamsDistributeSimdDirective>(*S));
372
126
    break;
373
328
  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
374
328
    EmitOMPTeamsDistributeParallelForSimdDirective(
375
328
        cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
376
328
    break;
377
340
  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
378
340
    EmitOMPTeamsDistributeParallelForDirective(
379
340
        cast<OMPTeamsDistributeParallelForDirective>(*S));
380
340
    break;
381
757
  case Stmt::OMPTargetTeamsDirectiveClass:
382
757
    EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
383
757
    break;
384
462
  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
385
462
    EmitOMPTargetTeamsDistributeDirective(
386
462
        cast<OMPTargetTeamsDistributeDirective>(*S));
387
462
    break;
388
498
  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
389
498
    EmitOMPTargetTeamsDistributeParallelForDirective(
390
498
        cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
391
498
    break;
392
557
  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
393
557
    EmitOMPTargetTeamsDistributeParallelForSimdDirective(
394
557
        cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
395
557
    break;
396
428
  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
397
428
    EmitOMPTargetTeamsDistributeSimdDirective(
398
428
        cast<OMPTargetTeamsDistributeSimdDirective>(*S));
399
428
    break;
400
6
  case Stmt::OMPInteropDirectiveClass:
401
6
    EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
402
6
    break;
403
0
  case Stmt::OMPDispatchDirectiveClass:
404
0
    llvm_unreachable("Dispatch directive not supported yet.");
405
0
    break;
406
40
  case Stmt::OMPMaskedDirectiveClass:
407
40
    EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
408
40
    break;
409
0
  case Stmt::OMPGenericLoopDirectiveClass:
410
0
    EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
411
0
    break;
412
0
  case Stmt::OMPTeamsGenericLoopDirectiveClass:
413
0
    llvm_unreachable("teams loop directive not supported yet.");
414
0
    break;
415
0
  case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
416
0
    llvm_unreachable("target teams loop directive not supported yet.");
417
0
    break;
418
0
  case Stmt::OMPParallelGenericLoopDirectiveClass:
419
0
    llvm_unreachable("parallel loop directive not supported yet.");
420
0
    break;
421
0
  case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
422
0
    llvm_unreachable("target parallel loop directive not supported yet.");
423
0
    break;
424
0
  case Stmt::OMPParallelMaskedDirectiveClass:
425
0
    llvm_unreachable("parallel masked directive not supported yet.");
426
0
    break;
427
621k
  }
428
621k
}
429
430
bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
431
983k
                                     ArrayRef<const Attr *> Attrs) {
432
983k
  switch (S->getStmtClass()) {
433
640k
  default:
434
640k
    return false;
435
11.9k
  case Stmt::NullStmtClass:
436
11.9k
    break;
437
112k
  case Stmt::CompoundStmtClass:
438
112k
    EmitCompoundStmt(cast<CompoundStmt>(*S));
439
112k
    break;
440
196k
  case Stmt::DeclStmtClass:
441
196k
    EmitDeclStmt(cast<DeclStmt>(*S));
442
196k
    break;
443
217
  case Stmt::LabelStmtClass:
444
217
    EmitLabelStmt(cast<LabelStmt>(*S));
445
217
    break;
446
291
  case Stmt::AttributedStmtClass:
447
291
    EmitAttributedStmt(cast<AttributedStmt>(*S));
448
291
    break;
449
3.13k
  case Stmt::GotoStmtClass:
450
3.13k
    EmitGotoStmt(cast<GotoStmt>(*S));
451
3.13k
    break;
452
5.77k
  case Stmt::BreakStmtClass:
453
5.77k
    EmitBreakStmt(cast<BreakStmt>(*S));
454
5.77k
    break;
455
10.9k
  case Stmt::ContinueStmtClass:
456
10.9k
    EmitContinueStmt(cast<ContinueStmt>(*S));
457
10.9k
    break;
458
197
  case Stmt::DefaultStmtClass:
459
197
    EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
460
197
    break;
461
1.05k
  case Stmt::CaseStmtClass:
462
1.05k
    EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
463
1.05k
    break;
464
10
  case Stmt::SEHLeaveStmtClass:
465
10
    EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
466
10
    break;
467
983k
  }
468
343k
  return true;
469
983k
}
470
471
/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
472
/// this captures the expression result of the last sub-statement and returns it
473
/// (for use by the statement expression extension).
474
Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
475
117k
                                          AggValueSlot AggSlot) {
476
117k
  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
477
117k
                             "LLVM IR generation of compound statement ('{}')");
478
479
  // Keep track of the current cleanup stack depth, including debug scopes.
480
117k
  LexicalScope Scope(*this, S.getSourceRange());
481
482
117k
  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
483
117k
}
484
485
Address
486
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
487
                                              bool GetLast,
488
330k
                                              AggValueSlot AggSlot) {
489
490
330k
  const Stmt *ExprResult = S.getStmtExprResult();
491
330k
  assert((!GetLast || (GetLast && ExprResult)) &&
492
330k
         "If GetLast is true then the CompoundStmt must have a StmtExprResult");
493
494
0
  Address RetAlloca = Address::invalid();
495
496
719k
  for (auto *CurStmt : S.body()) {
497
719k
    if (GetLast && 
ExprResult == CurStmt16.0k
) {
498
      // We have to special case labels here.  They are statements, but when put
499
      // at the end of a statement expression, they yield the value of their
500
      // subexpression.  Handle this by walking through all labels we encounter,
501
      // emitting them before we evaluate the subexpr.
502
      // Similar issues arise for attributed statements.
503
3.68k
      while (!isa<Expr>(ExprResult)) {
504
5
        if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
505
3
          EmitLabel(LS->getDecl());
506
3
          ExprResult = LS->getSubStmt();
507
3
        } else 
if (const auto *2
AS2
= dyn_cast<AttributedStmt>(ExprResult)) {
508
          // FIXME: Update this if we ever have attributes that affect the
509
          // semantics of an expression.
510
2
          ExprResult = AS->getSubStmt();
511
2
        } else {
512
0
          llvm_unreachable("unknown value statement");
513
0
        }
514
5
      }
515
516
3.67k
      EnsureInsertPoint();
517
518
3.67k
      const Expr *E = cast<Expr>(ExprResult);
519
3.67k
      QualType ExprTy = E->getType();
520
3.67k
      if (hasAggregateEvaluationKind(ExprTy)) {
521
647
        EmitAggExpr(E, AggSlot);
522
3.02k
      } else {
523
        // We can't return an RValue here because there might be cleanups at
524
        // the end of the StmtExpr.  Because of that, we have to emit the result
525
        // here into a temporary alloca.
526
3.02k
        RetAlloca = CreateMemTemp(ExprTy);
527
3.02k
        EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
528
3.02k
                         /*IsInit*/ false);
529
3.02k
      }
530
715k
    } else {
531
715k
      EmitStmt(CurStmt);
532
715k
    }
533
719k
  }
534
535
330k
  return RetAlloca;
536
330k
}
537
538
912
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
539
912
  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
540
541
  // If there is a cleanup stack, then we it isn't worth trying to
542
  // simplify this block (we would need to remove it from the scope map
543
  // and cleanup entry).
544
912
  if (!EHStack.empty())
545
132
    return;
546
547
  // Can only simplify direct branches.
548
780
  if (!BI || !BI->isUnconditional())
549
0
    return;
550
551
  // Can only simplify empty blocks.
552
780
  if (BI->getIterator() != BB->begin())
553
1
    return;
554
555
779
  BB->replaceAllUsesWith(BI->getSuccessor(0));
556
779
  BI->eraseFromParent();
557
779
  BB->eraseFromParent();
558
779
}
559
560
601k
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
561
601k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
562
563
  // Fall out of the current block (if necessary).
564
601k
  EmitBranch(BB);
565
566
601k
  if (IsFinished && 
BB->use_empty()159k
) {
567
729
    delete BB;
568
729
    return;
569
729
  }
570
571
  // Place the block after the current block, if possible, or else at
572
  // the end of the function.
573
600k
  if (CurBB && 
CurBB->getParent()384k
)
574
384k
    CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
575
216k
  else
576
216k
    CurFn->getBasicBlockList().push_back(BB);
577
600k
  Builder.SetInsertPoint(BB);
578
600k
}
579
580
787k
void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
581
  // Emit a branch from the current block to the target one if this
582
  // was a real block.  If this was just a fall-through block after a
583
  // terminator, don't emit it.
584
787k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
585
586
787k
  if (!CurBB || 
CurBB->getTerminator()545k
) {
587
    // If there is no insert point or the previous block is already
588
    // terminated, don't touch it.
589
504k
  } else {
590
    // Otherwise, create a fall-through branch.
591
283k
    Builder.CreateBr(Target);
592
283k
  }
593
594
787k
  Builder.ClearInsertionPoint();
595
787k
}
596
597
660
void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
598
660
  bool inserted = false;
599
660
  for (llvm::User *u : block->users()) {
600
660
    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
601
660
      CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
602
660
                                             block);
603
660
      inserted = true;
604
660
      break;
605
660
    }
606
660
  }
607
608
660
  if (!inserted)
609
0
    CurFn->getBasicBlockList().push_back(block);
610
611
660
  Builder.SetInsertPoint(block);
612
660
}
613
614
CodeGenFunction::JumpDest
615
3.24k
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
616
3.24k
  JumpDest &Dest = LabelMap[D];
617
3.24k
  if (Dest.isValid()) 
return Dest3.09k
;
618
619
  // Create, but don't insert, the new block.
620
154
  Dest = JumpDest(createBasicBlock(D->getName()),
621
154
                  EHScopeStack::stable_iterator::invalid(),
622
154
                  NextCleanupDestIndex++);
623
154
  return Dest;
624
3.24k
}
625
626
220
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
627
  // Add this label to the current lexical scope if we're within any
628
  // normal cleanups.  Jumps "in" to this label --- when permitted by
629
  // the language --- may need to be routed around such cleanups.
630
220
  if (EHStack.hasNormalCleanups() && 
CurLexicalScope10
)
631
0
    CurLexicalScope->addLabel(D);
632
633
220
  JumpDest &Dest = LabelMap[D];
634
635
  // If we didn't need a forward reference to this label, just go
636
  // ahead and create a destination at the current scope.
637
220
  if (!Dest.isValid()) {
638
66
    Dest = getJumpDestInCurrentScope(D->getName());
639
640
  // Otherwise, we need to give this label a target depth and remove
641
  // it from the branch-fixups list.
642
154
  } else {
643
154
    assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
644
0
    Dest.setScopeDepth(EHStack.stable_begin());
645
154
    ResolveBranchFixups(Dest.getBlock());
646
154
  }
647
648
0
  EmitBlock(Dest.getBlock());
649
650
  // Emit debug info for labels.
651
220
  if (CGDebugInfo *DI = getDebugInfo()) {
652
22
    if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
653
22
      DI->setLocation(D->getLocation());
654
22
      DI->EmitLabel(D, Builder);
655
22
    }
656
22
  }
657
658
220
  incrementProfileCounter(D->getStmt());
659
220
}
660
661
/// Change the cleanup scope of the labels in this lexical scope to
662
/// match the scope of the enclosing context.
663
0
void CodeGenFunction::LexicalScope::rescopeLabels() {
664
0
  assert(!Labels.empty());
665
0
  EHScopeStack::stable_iterator innermostScope
666
0
    = CGF.EHStack.getInnermostNormalCleanup();
667
668
  // Change the scope depth of all the labels.
669
0
  for (SmallVectorImpl<const LabelDecl*>::const_iterator
670
0
         i = Labels.begin(), e = Labels.end(); i != e; ++i) {
671
0
    assert(CGF.LabelMap.count(*i));
672
0
    JumpDest &dest = CGF.LabelMap.find(*i)->second;
673
0
    assert(dest.getScopeDepth().isValid());
674
0
    assert(innermostScope.encloses(dest.getScopeDepth()));
675
0
    dest.setScopeDepth(innermostScope);
676
0
  }
677
678
  // Reparent the labels if the new scope also has cleanups.
679
0
  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
680
0
    ParentScope->Labels.append(Labels.begin(), Labels.end());
681
0
  }
682
0
}
683
684
685
217
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
686
217
  EmitLabel(S.getDecl());
687
688
  // IsEHa - emit eha.scope.begin if it's a side entry of a scope
689
217
  if (getLangOpts().EHAsynch && 
S.isSideEntry()0
)
690
0
    EmitSehCppScopeBegin();
691
692
217
  EmitStmt(S.getSubStmt());
693
217
}
694
695
291
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
696
291
  bool nomerge = false;
697
291
  bool noinline = false;
698
291
  bool alwaysinline = false;
699
291
  const CallExpr *musttail = nullptr;
700
701
363
  for (const auto *A : S.getAttrs()) {
702
363
    switch (A->getKind()) {
703
289
    default:
704
289
      break;
705
289
    case attr::NoMerge:
706
7
      nomerge = true;
707
7
      break;
708
10
    case attr::NoInline:
709
10
      noinline = true;
710
10
      break;
711
7
    case attr::AlwaysInline:
712
7
      alwaysinline = true;
713
7
      break;
714
50
    case attr::MustTail:
715
50
      const Stmt *Sub = S.getSubStmt();
716
50
      const ReturnStmt *R = cast<ReturnStmt>(Sub);
717
50
      musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
718
50
      break;
719
363
    }
720
363
  }
721
291
  SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
722
291
  SaveAndRestore<bool> save_noinline(InNoInlineAttributedStmt, noinline);
723
291
  SaveAndRestore<bool> save_alwaysinline(InAlwaysInlineAttributedStmt,
724
291
                                         alwaysinline);
725
291
  SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
726
291
  EmitStmt(S.getSubStmt(), S.getAttrs());
727
291
}
728
729
3.13k
void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
730
  // If this code is reachable then emit a stop point (if generating
731
  // debug info). We have to do this ourselves because we are on the
732
  // "simple" statement path.
733
3.13k
  if (HaveInsertPoint())
734
3.12k
    EmitStopPoint(&S);
735
736
3.13k
  EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
737
3.13k
}
738
739
740
22
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
741
22
  if (const LabelDecl *Target = S.getConstantTarget()) {
742
0
    EmitBranchThroughCleanup(getJumpDestForLabel(Target));
743
0
    return;
744
0
  }
745
746
  // Ensure that we have an i8* for our PHI node.
747
22
  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
748
22
                                         Int8PtrTy, "addr");
749
22
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
750
751
  // Get the basic block for the indirect goto.
752
22
  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
753
754
  // The first instruction in the block has to be the PHI for the switch dest,
755
  // add an entry for this branch.
756
22
  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
757
758
22
  EmitBranch(IndGotoBB);
759
22
}
760
761
121k
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
762
  // The else branch of a consteval if statement is always the only branch that
763
  // can be runtime evaluated.
764
121k
  if (S.isConsteval()) {
765
3
    const Stmt *Executed = S.isNegatedConsteval() ? 
S.getThen()2
:
S.getElse()1
;
766
3
    if (Executed) {
767
3
      RunCleanupsScope ExecutedScope(*this);
768
3
      EmitStmt(Executed);
769
3
    }
770
3
    return;
771
3
  }
772
773
  // C99 6.8.4.1: The first substatement is executed if the expression compares
774
  // unequal to 0.  The condition must be a scalar type.
775
121k
  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
776
777
121k
  if (S.getInit())
778
16
    EmitStmt(S.getInit());
779
780
121k
  if (S.getConditionVariable())
781
32
    EmitDecl(*S.getConditionVariable());
782
783
  // If the condition constant folds and can be elided, try to avoid emitting
784
  // the condition and the dead arm of the if/else.
785
121k
  bool CondConstant;
786
121k
  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
787
121k
                                   S.isConstexpr())) {
788
    // Figure out which block (then or else) is executed.
789
1.90k
    const Stmt *Executed = S.getThen();
790
1.90k
    const Stmt *Skipped  = S.getElse();
791
1.90k
    if (!CondConstant)  // Condition false?
792
1.76k
      std::swap(Executed, Skipped);
793
794
    // If the skipped block has no labels in it, just emit the executed block.
795
    // This avoids emitting dead code and simplifies the CFG substantially.
796
1.90k
    if (S.isConstexpr() || 
!ContainsLabel(Skipped)1.89k
) {
797
1.89k
      if (CondConstant)
798
134
        incrementProfileCounter(&S);
799
1.89k
      if (Executed) {
800
1.41k
        RunCleanupsScope ExecutedScope(*this);
801
1.41k
        EmitStmt(Executed);
802
1.41k
      }
803
1.89k
      return;
804
1.89k
    }
805
1.90k
  }
806
807
  // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
808
  // the conditional branch.
809
120k
  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
810
120k
  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
811
120k
  llvm::BasicBlock *ElseBlock = ContBlock;
812
120k
  if (S.getElse())
813
15.3k
    ElseBlock = createBasicBlock("if.else");
814
815
  // Prefer the PGO based weights over the likelihood attribute.
816
  // When the build isn't optimized the metadata isn't used, so don't generate
817
  // it.
818
120k
  Stmt::Likelihood LH = Stmt::LH_None;
819
120k
  uint64_t Count = getProfileCount(S.getThen());
820
120k
  if (!Count && 
CGM.getCodeGenOpts().OptimizationLevel119k
)
821
3.40k
    LH = Stmt::getLikelihood(S.getThen(), S.getElse());
822
120k
  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
823
824
  // Emit the 'then' code.
825
120k
  EmitBlock(ThenBlock);
826
120k
  incrementProfileCounter(&S);
827
120k
  {
828
120k
    RunCleanupsScope ThenScope(*this);
829
120k
    EmitStmt(S.getThen());
830
120k
  }
831
120k
  EmitBranch(ContBlock);
832
833
  // Emit the 'else' code if present.
834
120k
  if (const Stmt *Else = S.getElse()) {
835
15.3k
    {
836
      // There is no need to emit line number for an unconditional branch.
837
15.3k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
838
15.3k
      EmitBlock(ElseBlock);
839
15.3k
    }
840
15.3k
    {
841
15.3k
      RunCleanupsScope ElseScope(*this);
842
15.3k
      EmitStmt(Else);
843
15.3k
    }
844
15.3k
    {
845
      // There is no need to emit line number for an unconditional branch.
846
15.3k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
847
15.3k
      EmitBranch(ContBlock);
848
15.3k
    }
849
15.3k
  }
850
851
  // Emit the continuation block for code after the if.
852
120k
  EmitBlock(ContBlock, true);
853
120k
}
854
855
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
856
1.87k
                                    ArrayRef<const Attr *> WhileAttrs) {
857
  // Emit the header for the loop, which will also become
858
  // the continue target.
859
1.87k
  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
860
1.87k
  EmitBlock(LoopHeader.getBlock());
861
862
  // Create an exit block for when the condition fails, which will
863
  // also become the break target.
864
1.87k
  JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
865
866
  // Store the blocks to use for break and continue.
867
1.87k
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
868
869
  // C++ [stmt.while]p2:
870
  //   When the condition of a while statement is a declaration, the
871
  //   scope of the variable that is declared extends from its point
872
  //   of declaration (3.3.2) to the end of the while statement.
873
  //   [...]
874
  //   The object created in a condition is destroyed and created
875
  //   with each iteration of the loop.
876
1.87k
  RunCleanupsScope ConditionScope(*this);
877
878
1.87k
  if (S.getConditionVariable())
879
5
    EmitDecl(*S.getConditionVariable());
880
881
  // Evaluate the conditional in the while header.  C99 6.8.5.1: The
882
  // evaluation of the controlling expression takes place before each
883
  // execution of the loop body.
884
1.87k
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
885
886
  // while(1) is common, avoid extra exit blocks.  Be sure
887
  // to correctly handle break/continue though.
888
1.87k
  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
889
1.87k
  bool CondIsConstInt = C != nullptr;
890
1.87k
  bool EmitBoolCondBranch = !CondIsConstInt || 
!C->isOne()464
;
891
1.87k
  const SourceRange &R = S.getSourceRange();
892
1.87k
  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
893
1.87k
                 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
894
1.87k
                 SourceLocToDebugLoc(R.getEnd()),
895
1.87k
                 checkIfLoopMustProgress(CondIsConstInt));
896
897
  // As long as the condition is true, go to the loop body.
898
1.87k
  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
899
1.87k
  if (EmitBoolCondBranch) {
900
1.43k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
901
1.43k
    if (ConditionScope.requiresCleanups())
902
3
      ExitBlock = createBasicBlock("while.exit");
903
1.43k
    llvm::MDNode *Weights =
904
1.43k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
905
1.43k
    if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel1.40k
)
906
25
      BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
907
25
          BoolCondVal, Stmt::getLikelihood(S.getBody()));
908
1.43k
    Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
909
910
1.43k
    if (ExitBlock != LoopExit.getBlock()) {
911
3
      EmitBlock(ExitBlock);
912
3
      EmitBranchThroughCleanup(LoopExit);
913
3
    }
914
1.43k
  } else 
if (const Attr *448
A448
= Stmt::getLikelihoodAttr(S.getBody())) {
915
2
    CGM.getDiags().Report(A->getLocation(),
916
2
                          diag::warn_attribute_has_no_effect_on_infinite_loop)
917
2
        << A << A->getRange();
918
2
    CGM.getDiags().Report(
919
2
        S.getWhileLoc(),
920
2
        diag::note_attribute_has_no_effect_on_infinite_loop_here)
921
2
        << SourceRange(S.getWhileLoc(), S.getRParenLoc());
922
2
  }
923
924
  // Emit the loop body.  We have to emit this in a cleanup scope
925
  // because it might be a singleton DeclStmt.
926
1.87k
  {
927
1.87k
    RunCleanupsScope BodyScope(*this);
928
1.87k
    EmitBlock(LoopBody);
929
1.87k
    incrementProfileCounter(&S);
930
1.87k
    EmitStmt(S.getBody());
931
1.87k
  }
932
933
1.87k
  BreakContinueStack.pop_back();
934
935
  // Immediately force cleanup.
936
1.87k
  ConditionScope.ForceCleanup();
937
938
1.87k
  EmitStopPoint(&S);
939
  // Branch to the loop header again.
940
1.87k
  EmitBranch(LoopHeader.getBlock());
941
942
1.87k
  LoopStack.pop();
943
944
  // Emit the exit block.
945
1.87k
  EmitBlock(LoopExit.getBlock(), true);
946
947
  // The LoopHeader typically is just a branch if we skipped emitting
948
  // a branch, try to erase it.
949
1.87k
  if (!EmitBoolCondBranch)
950
448
    SimplifyForwardingBlocks(LoopHeader.getBlock());
951
1.87k
}
952
953
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
954
771
                                 ArrayRef<const Attr *> DoAttrs) {
955
771
  JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
956
771
  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
957
958
771
  uint64_t ParentCount = getCurrentProfileCount();
959
960
  // Store the blocks to use for break and continue.
961
771
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
962
963
  // Emit the body of the loop.
964
771
  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
965
966
771
  EmitBlockWithFallThrough(LoopBody, &S);
967
771
  {
968
771
    RunCleanupsScope BodyScope(*this);
969
771
    EmitStmt(S.getBody());
970
771
  }
971
972
771
  EmitBlock(LoopCond.getBlock());
973
974
  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
975
  // after each execution of the loop body."
976
977
  // Evaluate the conditional in the while header.
978
  // C99 6.8.5p2/p4: The first substatement is executed if the expression
979
  // compares unequal to 0.  The condition must be a scalar type.
980
771
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
981
982
771
  BreakContinueStack.pop_back();
983
984
  // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
985
  // to correctly handle break/continue though.
986
771
  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
987
771
  bool CondIsConstInt = C;
988
771
  bool EmitBoolCondBranch = !C || 
!C->isZero()544
;
989
990
771
  const SourceRange &R = S.getSourceRange();
991
771
  LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
992
771
                 SourceLocToDebugLoc(R.getBegin()),
993
771
                 SourceLocToDebugLoc(R.getEnd()),
994
771
                 checkIfLoopMustProgress(CondIsConstInt));
995
996
  // As long as the condition is true, iterate the loop.
997
771
  if (EmitBoolCondBranch) {
998
307
    uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
999
307
    Builder.CreateCondBr(
1000
307
        BoolCondVal, LoopBody, LoopExit.getBlock(),
1001
307
        createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1002
307
  }
1003
1004
771
  LoopStack.pop();
1005
1006
  // Emit the exit block.
1007
771
  EmitBlock(LoopExit.getBlock());
1008
1009
  // The DoCond block typically is just a branch if we skipped
1010
  // emitting a branch, try to erase it.
1011
771
  if (!EmitBoolCondBranch)
1012
464
    SimplifyForwardingBlocks(LoopCond.getBlock());
1013
771
}
1014
1015
void CodeGenFunction::EmitForStmt(const ForStmt &S,
1016
17.1k
                                  ArrayRef<const Attr *> ForAttrs) {
1017
17.1k
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1018
1019
17.1k
  LexicalScope ForScope(*this, S.getSourceRange());
1020
1021
  // Evaluate the first part before the loop.
1022
17.1k
  if (S.getInit())
1023
16.5k
    EmitStmt(S.getInit());
1024
1025
  // Start the loop with a block that tests the condition.
1026
  // If there's an increment, the continue scope will be overwritten
1027
  // later.
1028
17.1k
  JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1029
17.1k
  llvm::BasicBlock *CondBlock = CondDest.getBlock();
1030
17.1k
  EmitBlock(CondBlock);
1031
1032
17.1k
  Expr::EvalResult Result;
1033
17.1k
  bool CondIsConstInt =
1034
17.1k
      !S.getCond() || 
S.getCond()->EvaluateAsInt(Result, getContext())17.1k
;
1035
1036
17.1k
  const SourceRange &R = S.getSourceRange();
1037
17.1k
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1038
17.1k
                 SourceLocToDebugLoc(R.getBegin()),
1039
17.1k
                 SourceLocToDebugLoc(R.getEnd()),
1040
17.1k
                 checkIfLoopMustProgress(CondIsConstInt));
1041
1042
  // Create a cleanup scope for the condition variable cleanups.
1043
17.1k
  LexicalScope ConditionScope(*this, S.getSourceRange());
1044
1045
  // If the for loop doesn't have an increment we can just use the condition as
1046
  // the continue block. Otherwise, if there is no condition variable, we can
1047
  // form the continue block now. If there is a condition variable, we can't
1048
  // form the continue block until after we've emitted the condition, because
1049
  // the condition is in scope in the increment, but Sema's jump diagnostics
1050
  // ensure that there are no continues from the condition variable that jump
1051
  // to the loop increment.
1052
17.1k
  JumpDest Continue;
1053
17.1k
  if (!S.getInc())
1054
289
    Continue = CondDest;
1055
16.8k
  else if (!S.getConditionVariable())
1056
16.8k
    Continue = getJumpDestInCurrentScope("for.inc");
1057
17.1k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1058
1059
17.1k
  if (S.getCond()) {
1060
    // If the for statement has a condition scope, emit the local variable
1061
    // declaration.
1062
17.1k
    if (S.getConditionVariable()) {
1063
7
      EmitDecl(*S.getConditionVariable());
1064
1065
      // We have entered the condition variable's scope, so we're now able to
1066
      // jump to the continue block.
1067
7
      Continue = S.getInc() ? 
getJumpDestInCurrentScope("for.inc")6
:
CondDest1
;
1068
7
      BreakContinueStack.back().ContinueBlock = Continue;
1069
7
    }
1070
1071
17.1k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1072
    // If there are any cleanups between here and the loop-exit scope,
1073
    // create a block to stage a loop exit along.
1074
17.1k
    if (ForScope.requiresCleanups())
1075
108
      ExitBlock = createBasicBlock("for.cond.cleanup");
1076
1077
    // As long as the condition is true, iterate the loop.
1078
17.1k
    llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1079
1080
    // C99 6.8.5p2/p4: The first substatement is executed if the expression
1081
    // compares unequal to 0.  The condition must be a scalar type.
1082
17.1k
    llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1083
17.1k
    llvm::MDNode *Weights =
1084
17.1k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1085
17.1k
    if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel17.0k
)
1086
90
      BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1087
90
          BoolCondVal, Stmt::getLikelihood(S.getBody()));
1088
1089
17.1k
    Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1090
1091
17.1k
    if (ExitBlock != LoopExit.getBlock()) {
1092
108
      EmitBlock(ExitBlock);
1093
108
      EmitBranchThroughCleanup(LoopExit);
1094
108
    }
1095
1096
17.1k
    EmitBlock(ForBody);
1097
17.1k
  } else {
1098
    // Treat it as a non-zero constant.  Don't even create a new block for the
1099
    // body, just fall into it.
1100
69
  }
1101
17.1k
  incrementProfileCounter(&S);
1102
1103
17.1k
  {
1104
    // Create a separate cleanup scope for the body, in case it is not
1105
    // a compound statement.
1106
17.1k
    RunCleanupsScope BodyScope(*this);
1107
17.1k
    EmitStmt(S.getBody());
1108
17.1k
  }
1109
1110
  // If there is an increment, emit it next.
1111
17.1k
  if (S.getInc()) {
1112
16.8k
    EmitBlock(Continue.getBlock());
1113
16.8k
    EmitStmt(S.getInc());
1114
16.8k
  }
1115
1116
17.1k
  BreakContinueStack.pop_back();
1117
1118
17.1k
  ConditionScope.ForceCleanup();
1119
1120
17.1k
  EmitStopPoint(&S);
1121
17.1k
  EmitBranch(CondBlock);
1122
1123
17.1k
  ForScope.ForceCleanup();
1124
1125
17.1k
  LoopStack.pop();
1126
1127
  // Emit the fall-through block.
1128
17.1k
  EmitBlock(LoopExit.getBlock(), true);
1129
17.1k
}
1130
1131
void
1132
CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1133
126
                                     ArrayRef<const Attr *> ForAttrs) {
1134
126
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1135
1136
126
  LexicalScope ForScope(*this, S.getSourceRange());
1137
1138
  // Evaluate the first pieces before the loop.
1139
126
  if (S.getInit())
1140
2
    EmitStmt(S.getInit());
1141
126
  EmitStmt(S.getRangeStmt());
1142
126
  EmitStmt(S.getBeginStmt());
1143
126
  EmitStmt(S.getEndStmt());
1144
1145
  // Start the loop with a block that tests the condition.
1146
  // If there's an increment, the continue scope will be overwritten
1147
  // later.
1148
126
  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1149
126
  EmitBlock(CondBlock);
1150
1151
126
  const SourceRange &R = S.getSourceRange();
1152
126
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1153
126
                 SourceLocToDebugLoc(R.getBegin()),
1154
126
                 SourceLocToDebugLoc(R.getEnd()));
1155
1156
  // If there are any cleanups between here and the loop-exit scope,
1157
  // create a block to stage a loop exit along.
1158
126
  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1159
126
  if (ForScope.requiresCleanups())
1160
17
    ExitBlock = createBasicBlock("for.cond.cleanup");
1161
1162
  // The loop body, consisting of the specified body and the loop variable.
1163
126
  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1164
1165
  // The body is executed if the expression, contextually converted
1166
  // to bool, is true.
1167
126
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1168
126
  llvm::MDNode *Weights =
1169
126
      createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1170
126
  if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel125
)
1171
10
    BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1172
10
        BoolCondVal, Stmt::getLikelihood(S.getBody()));
1173
126
  Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1174
1175
126
  if (ExitBlock != LoopExit.getBlock()) {
1176
17
    EmitBlock(ExitBlock);
1177
17
    EmitBranchThroughCleanup(LoopExit);
1178
17
  }
1179
1180
126
  EmitBlock(ForBody);
1181
126
  incrementProfileCounter(&S);
1182
1183
  // Create a block for the increment. In case of a 'continue', we jump there.
1184
126
  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1185
1186
  // Store the blocks to use for break and continue.
1187
126
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1188
1189
126
  {
1190
    // Create a separate cleanup scope for the loop variable and body.
1191
126
    LexicalScope BodyScope(*this, S.getSourceRange());
1192
126
    EmitStmt(S.getLoopVarStmt());
1193
126
    EmitStmt(S.getBody());
1194
126
  }
1195
1196
126
  EmitStopPoint(&S);
1197
  // If there is an increment, emit it next.
1198
126
  EmitBlock(Continue.getBlock());
1199
126
  EmitStmt(S.getInc());
1200
1201
126
  BreakContinueStack.pop_back();
1202
1203
126
  EmitBranch(CondBlock);
1204
1205
126
  ForScope.ForceCleanup();
1206
1207
126
  LoopStack.pop();
1208
1209
  // Emit the fall-through block.
1210
126
  EmitBlock(LoopExit.getBlock(), true);
1211
126
}
1212
1213
457
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1214
457
  if (RV.isScalar()) {
1215
455
    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1216
455
  } else 
if (2
RV.isAggregate()2
) {
1217
2
    LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1218
2
    LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1219
2
    EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1220
2
  } else {
1221
0
    EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1222
0
                       /*init*/ true);
1223
0
  }
1224
457
  EmitBranchThroughCleanup(ReturnBlock);
1225
457
}
1226
1227
namespace {
1228
// RAII struct used to save and restore a return statment's result expression.
1229
struct SaveRetExprRAII {
1230
  SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1231
153k
      : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1232
153k
    CGF.RetExpr = RetExpr;
1233
153k
  }
1234
153k
  ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1235
  const Expr *OldRetExpr;
1236
  CodeGenFunction &CGF;
1237
};
1238
} // namespace
1239
1240
/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1241
/// codegen it as 'tail call ...; ret void;'.
1242
static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1243
2.98k
                                     const CGFunctionInfo *CurFnInfo) {
1244
2.98k
  auto calleeQualType = CE->getCallee()->getType();
1245
2.98k
  const FunctionType *calleeType = nullptr;
1246
2.98k
  if (calleeQualType->isFunctionPointerType() ||
1247
2.98k
      
calleeQualType->isFunctionReferenceType()45
||
1248
2.98k
      
calleeQualType->isBlockPointerType()45
||
1249
2.98k
      
calleeQualType->isMemberFunctionPointerType()45
) {
1250
2.94k
    calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1251
2.94k
  } else 
if (auto *45
ty45
= dyn_cast<FunctionType>(calleeQualType)) {
1252
1
    calleeType = ty;
1253
44
  } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1254
44
    if (auto methodDecl = CMCE->getMethodDecl()) {
1255
      // getMethodDecl() doesn't handle member pointers at the moment.
1256
34
      calleeType = methodDecl->getType()->castAs<FunctionType>();
1257
34
    } else {
1258
10
      return;
1259
10
    }
1260
44
  } else {
1261
0
    return;
1262
0
  }
1263
2.97k
  if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1264
2.97k
      
(CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)150
) {
1265
150
    auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1266
150
    CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1267
150
    Builder.CreateRetVoid();
1268
150
    Builder.ClearInsertionPoint();
1269
150
  }
1270
2.97k
}
1271
1272
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1273
/// if the function returns void, or may be missing one if the function returns
1274
/// non-void.  Fun stuff :).
1275
153k
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1276
153k
  if (requiresReturnValueCheck()) {
1277
15
    llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1278
15
    auto *SLocPtr =
1279
15
        new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1280
15
                                 llvm::GlobalVariable::PrivateLinkage, SLoc);
1281
15
    SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1282
15
    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1283
15
    assert(ReturnLocation.isValid() && "No valid return location");
1284
0
    Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1285
15
                        ReturnLocation);
1286
15
  }
1287
1288
  // Returning from an outlined SEH helper is UB, and we already warn on it.
1289
153k
  if (IsOutlinedSEHHelper) {
1290
8
    Builder.CreateUnreachable();
1291
8
    Builder.ClearInsertionPoint();
1292
8
  }
1293
1294
  // Emit the result value, even if unused, to evaluate the side effects.
1295
153k
  const Expr *RV = S.getRetValue();
1296
1297
  // Record the result expression of the return statement. The recorded
1298
  // expression is used to determine whether a block capture's lifetime should
1299
  // end at the end of the full expression as opposed to the end of the scope
1300
  // enclosing the block expression.
1301
  //
1302
  // This permits a small, easily-implemented exception to our over-conservative
1303
  // rules about not jumping to statements following block literals with
1304
  // non-trivial cleanups.
1305
153k
  SaveRetExprRAII SaveRetExpr(RV, *this);
1306
1307
153k
  RunCleanupsScope cleanupScope(*this);
1308
153k
  if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1309
5.75k
    RV = EWC->getSubExpr();
1310
  // FIXME: Clean this up by using an LValue for ReturnTemp,
1311
  // EmitStoreThroughLValue, and EmitAnyExpr.
1312
  // Check if the NRVO candidate was not globalized in OpenMP mode.
1313
153k
  if (getLangOpts().ElideConstructors && 
S.getNRVOCandidate()153k
&&
1314
153k
      
S.getNRVOCandidate()->isNRVOVariable()1.40k
&&
1315
153k
      
(1.39k
!getLangOpts().OpenMP1.39k
||
1316
1.39k
       !CGM.getOpenMPRuntime()
1317
82
            .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1318
1.39k
            .isValid())) {
1319
    // Apply the named return value optimization for this return statement,
1320
    // which means doing nothing: the appropriate result has already been
1321
    // constructed into the NRVO variable.
1322
1323
    // If there is an NRVO flag for this variable, set it to 1 into indicate
1324
    // that the cleanup code should not destroy the variable.
1325
1.39k
    if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1326
228
      Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1327
152k
  } else if (!ReturnValue.isValid() || 
(148k
RV148k
&&
RV->getType()->isVoidType()148k
)) {
1328
    // Make sure not to return anything, but evaluate the expression
1329
    // for side effects.
1330
3.96k
    if (RV) {
1331
3.01k
      EmitAnyExpr(RV);
1332
3.01k
      if (auto *CE = dyn_cast<CallExpr>(RV))
1333
2.98k
        makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1334
3.01k
    }
1335
148k
  } else if (!RV) {
1336
    // Do nothing (return value is left uninitialized)
1337
148k
  } else if (FnRetTy->isReferenceType()) {
1338
    // If this function returns a reference, take the address of the expression
1339
    // rather than the value.
1340
18.5k
    RValue Result = EmitReferenceBindingToExpr(RV);
1341
18.5k
    Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1342
129k
  } else {
1343
129k
    switch (getEvaluationKind(RV->getType())) {
1344
122k
    case TEK_Scalar:
1345
122k
      Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1346
122k
      break;
1347
695
    case TEK_Complex:
1348
695
      EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1349
695
                                /*isInit*/ true);
1350
695
      break;
1351
6.74k
    case TEK_Aggregate:
1352
6.74k
      EmitAggExpr(RV, AggValueSlot::forAddr(
1353
6.74k
                          ReturnValue, Qualifiers(),
1354
6.74k
                          AggValueSlot::IsDestructed,
1355
6.74k
                          AggValueSlot::DoesNotNeedGCBarriers,
1356
6.74k
                          AggValueSlot::IsNotAliased,
1357
6.74k
                          getOverlapForReturnValue()));
1358
6.74k
      break;
1359
129k
    }
1360
129k
  }
1361
1362
153k
  ++NumReturnExprs;
1363
153k
  if (!RV || 
RV->isEvaluatable(getContext())152k
)
1364
16.3k
    ++NumSimpleReturnExprs;
1365
1366
153k
  cleanupScope.ForceCleanup();
1367
153k
  EmitBranchThroughCleanup(ReturnBlock);
1368
153k
}
1369
1370
196k
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1371
  // As long as debug info is modeled with instructions, we have to ensure we
1372
  // have a place to insert here and write the stop point here.
1373
196k
  if (HaveInsertPoint())
1374
196k
    EmitStopPoint(&S);
1375
1376
196k
  for (const auto *I : S.decls())
1377
198k
    EmitDecl(*I);
1378
196k
}
1379
1380
5.77k
void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1381
5.77k
  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1382
1383
  // If this code is reachable then emit a stop point (if generating
1384
  // debug info). We have to do this ourselves because we are on the
1385
  // "simple" statement path.
1386
5.77k
  if (HaveInsertPoint())
1387
5.77k
    EmitStopPoint(&S);
1388
1389
5.77k
  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1390
5.77k
}
1391
1392
10.9k
void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1393
10.9k
  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1394
1395
  // If this code is reachable then emit a stop point (if generating
1396
  // debug info). We have to do this ourselves because we are on the
1397
  // "simple" statement path.
1398
10.9k
  if (HaveInsertPoint())
1399
10.9k
    EmitStopPoint(&S);
1400
1401
10.9k
  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1402
10.9k
}
1403
1404
/// EmitCaseStmtRange - If case statement range is not too big then
1405
/// add multiple cases to switch instruction, one for each value within
1406
/// the range. If range is too big then emit "if" condition check.
1407
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1408
43
                                        ArrayRef<const Attr *> Attrs) {
1409
43
  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1410
1411
0
  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1412
43
  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1413
1414
  // Emit the code for this case. We do this first to make sure it is
1415
  // properly chained from our predecessor before generating the
1416
  // switch machinery to enter this block.
1417
43
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1418
43
  EmitBlockWithFallThrough(CaseDest, &S);
1419
43
  EmitStmt(S.getSubStmt());
1420
1421
  // If range is empty, do nothing.
1422
43
  if (LHS.isSigned() ? 
RHS.slt(LHS)39
:
RHS.ult(LHS)4
)
1423
4
    return;
1424
1425
39
  Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1426
39
  llvm::APInt Range = RHS - LHS;
1427
  // FIXME: parameters such as this should not be hardcoded.
1428
39
  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1429
    // Range is small enough to add multiple switch instruction cases.
1430
22
    uint64_t Total = getProfileCount(&S);
1431
22
    unsigned NCases = Range.getZExtValue() + 1;
1432
    // We only have one region counter for the entire set of cases here, so we
1433
    // need to divide the weights evenly between the generated cases, ensuring
1434
    // that the total weight is preserved. E.g., a weight of 5 over three cases
1435
    // will be distributed as weights of 2, 2, and 1.
1436
22
    uint64_t Weight = Total / NCases, Rem = Total % NCases;
1437
103
    for (unsigned I = 0; I != NCases; 
++I81
) {
1438
81
      if (SwitchWeights)
1439
24
        SwitchWeights->push_back(Weight + (Rem ? 
14
:
020
));
1440
57
      else if (SwitchLikelihood)
1441
35
        SwitchLikelihood->push_back(LH);
1442
1443
81
      if (Rem)
1444
4
        Rem--;
1445
81
      SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1446
81
      ++LHS;
1447
81
    }
1448
22
    return;
1449
22
  }
1450
1451
  // The range is too big. Emit "if" condition into a new block,
1452
  // making sure to save and restore the current insertion point.
1453
17
  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1454
1455
  // Push this test onto the chain of range checks (which terminates
1456
  // in the default basic block). The switch's default will be changed
1457
  // to the top of this chain after switch emission is complete.
1458
17
  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1459
17
  CaseRangeBlock = createBasicBlock("sw.caserange");
1460
1461
17
  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1462
17
  Builder.SetInsertPoint(CaseRangeBlock);
1463
1464
  // Emit range check.
1465
17
  llvm::Value *Diff =
1466
17
    Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1467
17
  llvm::Value *Cond =
1468
17
    Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1469
1470
17
  llvm::MDNode *Weights = nullptr;
1471
17
  if (SwitchWeights) {
1472
8
    uint64_t ThisCount = getProfileCount(&S);
1473
8
    uint64_t DefaultCount = (*SwitchWeights)[0];
1474
8
    Weights = createProfileWeights(ThisCount, DefaultCount);
1475
1476
    // Since we're chaining the switch default through each large case range, we
1477
    // need to update the weight for the default, ie, the first case, to include
1478
    // this case.
1479
8
    (*SwitchWeights)[0] += ThisCount;
1480
9
  } else if (SwitchLikelihood)
1481
7
    Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1482
1483
17
  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1484
1485
  // Restore the appropriate insertion point.
1486
17
  if (RestoreBB)
1487
13
    Builder.SetInsertPoint(RestoreBB);
1488
4
  else
1489
4
    Builder.ClearInsertionPoint();
1490
17
}
1491
1492
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1493
1.05k
                                   ArrayRef<const Attr *> Attrs) {
1494
  // If there is no enclosing switch instance that we're aware of, then this
1495
  // case statement and its block can be elided.  This situation only happens
1496
  // when we've constant-folded the switch, are emitting the constant case,
1497
  // and part of the constant case includes another case statement.  For
1498
  // instance: switch (4) { case 4: do { case 5: } while (1); }
1499
1.05k
  if (!SwitchInsn) {
1500
2
    EmitStmt(S.getSubStmt());
1501
2
    return;
1502
2
  }
1503
1504
  // Handle case ranges.
1505
1.04k
  if (S.getRHS()) {
1506
43
    EmitCaseStmtRange(S, Attrs);
1507
43
    return;
1508
43
  }
1509
1510
1.00k
  llvm::ConstantInt *CaseVal =
1511
1.00k
    Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1512
1.00k
  if (SwitchLikelihood)
1513
114
    SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1514
1515
  // If the body of the case is just a 'break', try to not emit an empty block.
1516
  // If we're profiling or we're not optimizing, leave the block in for better
1517
  // debug and coverage analysis.
1518
1.00k
  if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1519
1.00k
      
CGM.getCodeGenOpts().OptimizationLevel > 0976
&&
1520
1.00k
      
isa<BreakStmt>(S.getSubStmt())121
) {
1521
15
    JumpDest Block = BreakContinueStack.back().BreakBlock;
1522
1523
    // Only do this optimization if there are no cleanups that need emitting.
1524
15
    if (isObviouslyBranchWithoutCleanups(Block)) {
1525
15
      if (SwitchWeights)
1526
0
        SwitchWeights->push_back(getProfileCount(&S));
1527
15
      SwitchInsn->addCase(CaseVal, Block.getBlock());
1528
1529
      // If there was a fallthrough into this case, make sure to redirect it to
1530
      // the end of the switch as well.
1531
15
      if (Builder.GetInsertBlock()) {
1532
0
        Builder.CreateBr(Block.getBlock());
1533
0
        Builder.ClearInsertionPoint();
1534
0
      }
1535
15
      return;
1536
15
    }
1537
15
  }
1538
1539
991
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1540
991
  EmitBlockWithFallThrough(CaseDest, &S);
1541
991
  if (SwitchWeights)
1542
54
    SwitchWeights->push_back(getProfileCount(&S));
1543
991
  SwitchInsn->addCase(CaseVal, CaseDest);
1544
1545
  // Recursively emitting the statement is acceptable, but is not wonderful for
1546
  // code where we have many case statements nested together, i.e.:
1547
  //  case 1:
1548
  //    case 2:
1549
  //      case 3: etc.
1550
  // Handling this recursively will create a new block for each case statement
1551
  // that falls through to the next case which is IR intensive.  It also causes
1552
  // deep recursion which can run into stack depth limitations.  Handle
1553
  // sequential non-range case statements specially.
1554
  //
1555
  // TODO When the next case has a likelihood attribute the code returns to the
1556
  // recursive algorithm. Maybe improve this case if it becomes common practice
1557
  // to use a lot of attributes.
1558
991
  const CaseStmt *CurCase = &S;
1559
991
  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1560
1561
  // Otherwise, iteratively add consecutive cases to this switch stmt.
1562
1.13k
  while (NextCase && 
NextCase->getRHS() == nullptr149
) {
1563
143
    CurCase = NextCase;
1564
143
    llvm::ConstantInt *CaseVal =
1565
143
      Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1566
1567
143
    if (SwitchWeights)
1568
7
      SwitchWeights->push_back(getProfileCount(NextCase));
1569
143
    if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1570
3
      CaseDest = createBasicBlock("sw.bb");
1571
3
      EmitBlockWithFallThrough(CaseDest, CurCase);
1572
3
    }
1573
    // Since this loop is only executed when the CaseStmt has no attributes
1574
    // use a hard-coded value.
1575
143
    if (SwitchLikelihood)
1576
14
      SwitchLikelihood->push_back(Stmt::LH_None);
1577
1578
143
    SwitchInsn->addCase(CaseVal, CaseDest);
1579
143
    NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1580
143
  }
1581
1582
  // Generate a stop point for debug info if the case statement is
1583
  // followed by a default statement. A fallthrough case before a
1584
  // default case gets its own branch target.
1585
991
  if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1586
2
    EmitStopPoint(CurCase);
1587
1588
  // Normal default recursion for non-cases.
1589
991
  EmitStmt(CurCase->getSubStmt());
1590
991
}
1591
1592
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1593
197
                                      ArrayRef<const Attr *> Attrs) {
1594
  // If there is no enclosing switch instance that we're aware of, then this
1595
  // default statement can be elided. This situation only happens when we've
1596
  // constant-folded the switch.
1597
197
  if (!SwitchInsn) {
1598
1
    EmitStmt(S.getSubStmt());
1599
1
    return;
1600
1
  }
1601
1602
196
  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1603
196
  assert(DefaultBlock->empty() &&
1604
196
         "EmitDefaultStmt: Default block already defined?");
1605
1606
196
  if (SwitchLikelihood)
1607
34
    SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1608
1609
196
  EmitBlockWithFallThrough(DefaultBlock, &S);
1610
1611
196
  EmitStmt(S.getSubStmt());
1612
196
}
1613
1614
/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1615
/// constant value that is being switched on, see if we can dead code eliminate
1616
/// the body of the switch to a simple series of statements to emit.  Basically,
1617
/// on a switch (5) we want to find these statements:
1618
///    case 5:
1619
///      printf(...);    <--
1620
///      ++i;            <--
1621
///      break;
1622
///
1623
/// and add them to the ResultStmts vector.  If it is unsafe to do this
1624
/// transformation (for example, one of the elided statements contains a label
1625
/// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1626
/// should include statements after it (e.g. the printf() line is a substmt of
1627
/// the case) then return CSFC_FallThrough.  If we handled it and found a break
1628
/// statement, then return CSFC_Success.
1629
///
1630
/// If Case is non-null, then we are looking for the specified case, checking
1631
/// that nothing we jump over contains labels.  If Case is null, then we found
1632
/// the case and are looking for the break.
1633
///
1634
/// If the recursive walk actually finds our Case, then we set FoundCase to
1635
/// true.
1636
///
1637
enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1638
static CSFC_Result CollectStatementsForCase(const Stmt *S,
1639
                                            const SwitchCase *Case,
1640
                                            bool &FoundCase,
1641
137
                              SmallVectorImpl<const Stmt*> &ResultStmts) {
1642
  // If this is a null statement, just succeed.
1643
137
  if (!S)
1644
0
    return Case ? CSFC_Success : CSFC_FallThrough;
1645
1646
  // If this is the switchcase (case 4: or default) that we're looking for, then
1647
  // we're in business.  Just add the substatement.
1648
137
  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1649
42
    if (S == Case) {
1650
30
      FoundCase = true;
1651
30
      return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1652
30
                                      ResultStmts);
1653
30
    }
1654
1655
    // Otherwise, this is some other case or default statement, just ignore it.
1656
12
    return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1657
12
                                    ResultStmts);
1658
42
  }
1659
1660
  // If we are in the live part of the code and we found our break statement,
1661
  // return a success!
1662
95
  if (!Case && 
isa<BreakStmt>(S)50
)
1663
12
    return CSFC_Success;
1664
1665
  // If this is a switch statement, then it might contain the SwitchCase, the
1666
  // break, or neither.
1667
83
  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1668
    // Handle this as two cases: we might be looking for the SwitchCase (if so
1669
    // the skipped statements must be skippable) or we might already have it.
1670
35
    CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1671
35
    bool StartedInLiveCode = FoundCase;
1672
35
    unsigned StartSize = ResultStmts.size();
1673
1674
    // If we've not found the case yet, scan through looking for it.
1675
35
    if (Case) {
1676
      // Keep track of whether we see a skipped declaration.  The code could be
1677
      // using the declaration even if it is skipped, so we can't optimize out
1678
      // the decl if the kept statements might refer to it.
1679
28
      bool HadSkippedDecl = false;
1680
1681
      // If we're looking for the case, just see if we can skip each of the
1682
      // substatements.
1683
60
      for (; Case && 
I != E46
;
++I32
) {
1684
45
        HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1685
1686
45
        switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1687
2
        case CSFC_Failure: return CSFC_Failure;
1688
26
        case CSFC_Success:
1689
          // A successful result means that either 1) that the statement doesn't
1690
          // have the case and is skippable, or 2) does contain the case value
1691
          // and also contains the break to exit the switch.  In the later case,
1692
          // we just verify the rest of the statements are elidable.
1693
26
          if (FoundCase) {
1694
            // If we found the case and skipped declarations, we can't do the
1695
            // optimization.
1696
8
            if (HadSkippedDecl)
1697
0
              return CSFC_Failure;
1698
1699
18
            
for (++I; 8
I != E;
++I10
)
1700
10
              if (CodeGenFunction::ContainsLabel(*I, true))
1701
0
                return CSFC_Failure;
1702
8
            return CSFC_Success;
1703
8
          }
1704
18
          break;
1705
18
        case CSFC_FallThrough:
1706
          // If we have a fallthrough condition, then we must have found the
1707
          // case started to include statements.  Consider the rest of the
1708
          // statements in the compound statement as candidates for inclusion.
1709
17
          assert(FoundCase && "Didn't find case but returned fallthrough?");
1710
          // We recursively found Case, so we're not looking for it anymore.
1711
0
          Case = nullptr;
1712
1713
          // If we found the case and skipped declarations, we can't do the
1714
          // optimization.
1715
17
          if (HadSkippedDecl)
1716
3
            return CSFC_Failure;
1717
14
          break;
1718
45
        }
1719
45
      }
1720
1721
15
      if (!FoundCase)
1722
1
        return CSFC_Success;
1723
1724
14
      assert(!HadSkippedDecl && "fallthrough after skipping decl");
1725
14
    }
1726
1727
    // If we have statements in our range, then we know that the statements are
1728
    // live and need to be added to the set of statements we're tracking.
1729
21
    bool AnyDecls = false;
1730
36
    for (; I != E; 
++I15
) {
1731
20
      AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1732
1733
20
      switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1734
0
      case CSFC_Failure: return CSFC_Failure;
1735
15
      case CSFC_FallThrough:
1736
        // A fallthrough result means that the statement was simple and just
1737
        // included in ResultStmt, keep adding them afterwards.
1738
15
        break;
1739
5
      case CSFC_Success:
1740
        // A successful result means that we found the break statement and
1741
        // stopped statement inclusion.  We just ensure that any leftover stmts
1742
        // are skippable and return success ourselves.
1743
5
        for (++I; I != E; 
++I0
)
1744
0
          if (CodeGenFunction::ContainsLabel(*I, true))
1745
0
            return CSFC_Failure;
1746
5
        return CSFC_Success;
1747
20
      }
1748
20
    }
1749
1750
    // If we're about to fall out of a scope without hitting a 'break;', we
1751
    // can't perform the optimization if there were any decls in that scope
1752
    // (we'd lose their end-of-lifetime).
1753
16
    if (AnyDecls) {
1754
      // If the entire compound statement was live, there's one more thing we
1755
      // can try before giving up: emit the whole thing as a single statement.
1756
      // We can do that unless the statement contains a 'break;'.
1757
      // FIXME: Such a break must be at the end of a construct within this one.
1758
      // We could emit this by just ignoring the BreakStmts entirely.
1759
3
      if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1760
3
        ResultStmts.resize(StartSize);
1761
3
        ResultStmts.push_back(S);
1762
3
      } else {
1763
0
        return CSFC_Failure;
1764
0
      }
1765
3
    }
1766
1767
16
    return CSFC_FallThrough;
1768
16
  }
1769
1770
  // Okay, this is some other statement that we don't handle explicitly, like a
1771
  // for statement or increment etc.  If we are skipping over this statement,
1772
  // just verify it doesn't have labels, which would make it invalid to elide.
1773
48
  if (Case) {
1774
17
    if (CodeGenFunction::ContainsLabel(S, true))
1775
0
      return CSFC_Failure;
1776
17
    return CSFC_Success;
1777
17
  }
1778
1779
  // Otherwise, we want to include this statement.  Everything is cool with that
1780
  // so long as it doesn't contain a break out of the switch we're in.
1781
31
  if (CodeGenFunction::containsBreak(S)) 
return CSFC_Failure1
;
1782
1783
  // Otherwise, everything is great.  Include the statement and tell the caller
1784
  // that we fall through and include the next statement as well.
1785
30
  ResultStmts.push_back(S);
1786
30
  return CSFC_FallThrough;
1787
31
}
1788
1789
/// FindCaseStatementsForValue - Find the case statement being jumped to and
1790
/// then invoke CollectStatementsForCase to find the list of statements to emit
1791
/// for a switch on constant.  See the comment above CollectStatementsForCase
1792
/// for more details.
1793
static bool FindCaseStatementsForValue(const SwitchStmt &S,
1794
                                       const llvm::APSInt &ConstantCondValue,
1795
                                SmallVectorImpl<const Stmt*> &ResultStmts,
1796
                                       ASTContext &C,
1797
46
                                       const SwitchCase *&ResultCase) {
1798
  // First step, find the switch case that is being branched to.  We can do this
1799
  // efficiently by scanning the SwitchCase list.
1800
46
  const SwitchCase *Case = S.getSwitchCaseList();
1801
46
  const DefaultStmt *DefaultCase = nullptr;
1802
1803
82
  for (; Case; 
Case = Case->getNextSwitchCase()36
) {
1804
    // It's either a default or case.  Just remember the default statement in
1805
    // case we're not jumping to any numbered cases.
1806
62
    if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1807
18
      DefaultCase = DS;
1808
18
      continue;
1809
18
    }
1810
1811
    // Check to see if this case is the one we're looking for.
1812
44
    const CaseStmt *CS = cast<CaseStmt>(Case);
1813
    // Don't handle case ranges yet.
1814
44
    if (CS->getRHS()) 
return false7
;
1815
1816
    // If we found our case, remember it as 'case'.
1817
37
    if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1818
19
      break;
1819
37
  }
1820
1821
  // If we didn't find a matching case, we use a default if it exists, or we
1822
  // elide the whole switch body!
1823
39
  if (!Case) {
1824
    // It is safe to elide the body of the switch if it doesn't contain labels
1825
    // etc.  If it is safe, return successfully with an empty ResultStmts list.
1826
20
    if (!DefaultCase)
1827
9
      return !CodeGenFunction::ContainsLabel(&S);
1828
11
    Case = DefaultCase;
1829
11
  }
1830
1831
  // Ok, we know which case is being jumped to, try to collect all the
1832
  // statements that follow it.  This can fail for a variety of reasons.  Also,
1833
  // check to see that the recursive walk actually found our case statement.
1834
  // Insane cases like this can fail to find it in the recursive walk since we
1835
  // don't handle every stmt kind:
1836
  // switch (4) {
1837
  //   while (1) {
1838
  //     case 4: ...
1839
30
  bool FoundCase = false;
1840
30
  ResultCase = Case;
1841
30
  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1842
30
                                  ResultStmts) != CSFC_Failure &&
1843
30
         
FoundCase26
;
1844
39
}
1845
1846
static Optional<SmallVector<uint64_t, 16>>
1847
61
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1848
  // Are there enough branches to weight them?
1849
61
  if (Likelihoods.size() <= 1)
1850
9
    return None;
1851
1852
52
  uint64_t NumUnlikely = 0;
1853
52
  uint64_t NumNone = 0;
1854
52
  uint64_t NumLikely = 0;
1855
215
  for (const auto LH : Likelihoods) {
1856
215
    switch (LH) {
1857
8
    case Stmt::LH_Unlikely:
1858
8
      ++NumUnlikely;
1859
8
      break;
1860
196
    case Stmt::LH_None:
1861
196
      ++NumNone;
1862
196
      break;
1863
11
    case Stmt::LH_Likely:
1864
11
      ++NumLikely;
1865
11
      break;
1866
215
    }
1867
215
  }
1868
1869
  // Is there a likelihood attribute used?
1870
52
  if (NumUnlikely == 0 && 
NumLikely == 044
)
1871
36
    return None;
1872
1873
  // When multiple cases share the same code they can be combined during
1874
  // optimization. In that case the weights of the branch will be the sum of
1875
  // the individual weights. Make sure the combined sum of all neutral cases
1876
  // doesn't exceed the value of a single likely attribute.
1877
  // The additions both avoid divisions by 0 and make sure the weights of None
1878
  // don't exceed the weight of Likely.
1879
16
  const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1880
16
  const uint64_t None = Likely / (NumNone + 1);
1881
16
  const uint64_t Unlikely = 0;
1882
1883
16
  SmallVector<uint64_t, 16> Result;
1884
16
  Result.reserve(Likelihoods.size());
1885
60
  for (const auto LH : Likelihoods) {
1886
60
    switch (LH) {
1887
8
    case Stmt::LH_Unlikely:
1888
8
      Result.push_back(Unlikely);
1889
8
      break;
1890
41
    case Stmt::LH_None:
1891
41
      Result.push_back(None);
1892
41
      break;
1893
11
    case Stmt::LH_Likely:
1894
11
      Result.push_back(Likely);
1895
11
      break;
1896
60
    }
1897
60
  }
1898
1899
16
  return Result;
1900
16
}
1901
1902
450
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1903
  // Handle nested switch statements.
1904
450
  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1905
450
  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1906
450
  SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1907
450
  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1908
1909
  // See if we can constant fold the condition of the switch and therefore only
1910
  // emit the live case statement (if any) of the switch.
1911
450
  llvm::APSInt ConstantCondValue;
1912
450
  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1913
46
    SmallVector<const Stmt*, 4> CaseStmts;
1914
46
    const SwitchCase *Case = nullptr;
1915
46
    if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1916
46
                                   getContext(), Case)) {
1917
35
      if (Case)
1918
26
        incrementProfileCounter(Case);
1919
35
      RunCleanupsScope ExecutedScope(*this);
1920
1921
35
      if (S.getInit())
1922
0
        EmitStmt(S.getInit());
1923
1924
      // Emit the condition variable if needed inside the entire cleanup scope
1925
      // used by this special case for constant folded switches.
1926
35
      if (S.getConditionVariable())
1927
0
        EmitDecl(*S.getConditionVariable());
1928
1929
      // At this point, we are no longer "within" a switch instance, so
1930
      // we can temporarily enforce this to ensure that any embedded case
1931
      // statements are not emitted.
1932
35
      SwitchInsn = nullptr;
1933
1934
      // Okay, we can dead code eliminate everything except this case.  Emit the
1935
      // specified series of statements and we're good.
1936
59
      for (unsigned i = 0, e = CaseStmts.size(); i != e; 
++i24
)
1937
24
        EmitStmt(CaseStmts[i]);
1938
35
      incrementProfileCounter(&S);
1939
1940
      // Now we want to restore the saved switch instance so that nested
1941
      // switches continue to function properly
1942
35
      SwitchInsn = SavedSwitchInsn;
1943
1944
35
      return;
1945
35
    }
1946
46
  }
1947
1948
415
  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1949
1950
415
  RunCleanupsScope ConditionScope(*this);
1951
1952
415
  if (S.getInit())
1953
7
    EmitStmt(S.getInit());
1954
1955
415
  if (S.getConditionVariable())
1956
4
    EmitDecl(*S.getConditionVariable());
1957
415
  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1958
1959
  // Create basic block to hold stuff that comes after switch
1960
  // statement. We also need to create a default block now so that
1961
  // explicit case ranges tests can have a place to jump to on
1962
  // failure.
1963
415
  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1964
415
  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1965
415
  if (PGO.haveRegionCounts()) {
1966
    // Walk the SwitchCase list to find how many there are.
1967
27
    uint64_t DefaultCount = 0;
1968
27
    unsigned NumCases = 0;
1969
27
    for (const SwitchCase *Case = S.getSwitchCaseList();
1970
123
         Case;
1971
96
         Case = Case->getNextSwitchCase()) {
1972
96
      if (isa<DefaultStmt>(Case))
1973
19
        DefaultCount = getProfileCount(Case);
1974
96
      NumCases += 1;
1975
96
    }
1976
27
    SwitchWeights = new SmallVector<uint64_t, 16>();
1977
27
    SwitchWeights->reserve(NumCases);
1978
    // The default needs to be first. We store the edge count, so we already
1979
    // know the right weight.
1980
27
    SwitchWeights->push_back(DefaultCount);
1981
388
  } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1982
61
    SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1983
    // Initialize the default case.
1984
61
    SwitchLikelihood->push_back(Stmt::LH_None);
1985
61
  }
1986
1987
415
  CaseRangeBlock = DefaultBlock;
1988
1989
  // Clear the insertion point to indicate we are in unreachable code.
1990
415
  Builder.ClearInsertionPoint();
1991
1992
  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1993
  // then reuse last ContinueBlock.
1994
415
  JumpDest OuterContinue;
1995
415
  if (!BreakContinueStack.empty())
1996
48
    OuterContinue = BreakContinueStack.back().ContinueBlock;
1997
1998
415
  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1999
2000
  // Emit switch body.
2001
415
  EmitStmt(S.getBody());
2002
2003
415
  BreakContinueStack.pop_back();
2004
2005
  // Update the default block in case explicit case range tests have
2006
  // been chained on top.
2007
415
  SwitchInsn->setDefaultDest(CaseRangeBlock);
2008
2009
  // If a default was never emitted:
2010
415
  if (!DefaultBlock->getParent()) {
2011
    // If we have cleanups, emit the default block so that there's a
2012
    // place to jump through the cleanups from.
2013
219
    if (ConditionScope.requiresCleanups()) {
2014
0
      EmitBlock(DefaultBlock);
2015
2016
    // Otherwise, just forward the default block to the switch end.
2017
219
    } else {
2018
219
      DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2019
219
      delete DefaultBlock;
2020
219
    }
2021
219
  }
2022
2023
415
  ConditionScope.ForceCleanup();
2024
2025
  // Emit continuation.
2026
415
  EmitBlock(SwitchExit.getBlock(), true);
2027
415
  incrementProfileCounter(&S);
2028
2029
  // If the switch has a condition wrapped by __builtin_unpredictable,
2030
  // create metadata that specifies that the switch is unpredictable.
2031
  // Don't bother if not optimizing because that metadata would not be used.
2032
415
  auto *Call = dyn_cast<CallExpr>(S.getCond());
2033
415
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 046
) {
2034
10
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2035
10
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2036
2
      llvm::MDBuilder MDHelper(getLLVMContext());
2037
2
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2038
2
                              MDHelper.createUnpredictable());
2039
2
    }
2040
10
  }
2041
2042
415
  if (SwitchWeights) {
2043
27
    assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2044
27
           "switch weights do not match switch cases");
2045
    // If there's only one jump destination there's no sense weighting it.
2046
27
    if (SwitchWeights->size() > 1)
2047
22
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2048
22
                              createProfileWeights(*SwitchWeights));
2049
27
    delete SwitchWeights;
2050
388
  } else if (SwitchLikelihood) {
2051
61
    assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2052
61
           "switch likelihoods do not match switch cases");
2053
0
    Optional<SmallVector<uint64_t, 16>> LHW =
2054
61
        getLikelihoodWeights(*SwitchLikelihood);
2055
61
    if (LHW) {
2056
16
      llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2057
16
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2058
16
                              createProfileWeights(*LHW));
2059
16
    }
2060
61
    delete SwitchLikelihood;
2061
61
  }
2062
0
  SwitchInsn = SavedSwitchInsn;
2063
415
  SwitchWeights = SavedSwitchWeights;
2064
415
  SwitchLikelihood = SavedSwitchLikelihood;
2065
415
  CaseRangeBlock = SavedCRBlock;
2066
415
}
2067
2068
static std::string
2069
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2070
2.96k
                 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2071
2.96k
  std::string Result;
2072
2073
7.30k
  while (*Constraint) {
2074
4.33k
    switch (*Constraint) {
2075
3.54k
    default:
2076
3.54k
      Result += Target.convertConstraint(Constraint);
2077
3.54k
      break;
2078
    // Ignore these
2079
1
    case '*':
2080
1
    case '?':
2081
1
    case '!':
2082
3
    case '=': // Will see this and the following in mult-alt constraints.
2083
3
    case '+':
2084
3
      break;
2085
1
    case '#': // Ignore the rest of the constraint alternative.
2086
3
      while (Constraint[1] && Constraint[1] != ',')
2087
2
        Constraint++;
2088
1
      break;
2089
14
    case '&':
2090
16
    case '%':
2091
16
      Result += *Constraint;
2092
18
      while (Constraint[1] && Constraint[1] == *Constraint)
2093
2
        Constraint++;
2094
16
      break;
2095
622
    case ',':
2096
622
      Result += "|";
2097
622
      break;
2098
143
    case 'g':
2099
143
      Result += "imr";
2100
143
      break;
2101
6
    case '[': {
2102
6
      assert(OutCons &&
2103
6
             "Must pass output names to constraints with a symbolic name");
2104
0
      unsigned Index;
2105
6
      bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2106
6
      assert(result && "Could not resolve symbolic name"); (void)result;
2107
6
      Result += llvm::utostr(Index);
2108
6
      break;
2109
14
    }
2110
4.33k
    }
2111
2112
4.33k
    Constraint++;
2113
4.33k
  }
2114
2115
2.96k
  return Result;
2116
2.96k
}
2117
2118
/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2119
/// as using a particular register add that as a constraint that will be used
2120
/// in this asm stmt.
2121
static std::string
2122
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2123
                       const TargetInfo &Target, CodeGenModule &CGM,
2124
                       const AsmStmt &Stmt, const bool EarlyClobber,
2125
2.96k
                       std::string *GCCReg = nullptr) {
2126
2.96k
  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2127
2.96k
  if (!AsmDeclRef)
2128
921
    return Constraint;
2129
2.04k
  const ValueDecl &Value = *AsmDeclRef->getDecl();
2130
2.04k
  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2131
2.04k
  if (!Variable)
2132
11
    return Constraint;
2133
2.03k
  if (Variable->getStorageClass() != SC_Register)
2134
1.39k
    return Constraint;
2135
639
  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2136
639
  if (!Attr)
2137
574
    return Constraint;
2138
65
  StringRef Register = Attr->getLabel();
2139
65
  assert(Target.isValidGCCRegisterName(Register));
2140
  // We're using validateOutputConstraint here because we only care if
2141
  // this is a register constraint.
2142
0
  TargetInfo::ConstraintInfo Info(Constraint, "");
2143
65
  if (Target.validateOutputConstraint(Info) &&
2144
65
      
!Info.allowsRegister()0
) {
2145
0
    CGM.ErrorUnsupported(&Stmt, "__asm__");
2146
0
    return Constraint;
2147
0
  }
2148
  // Canonicalize the register here before returning it.
2149
65
  Register = Target.getNormalizedGCCRegisterName(Register);
2150
65
  if (GCCReg != nullptr)
2151
12
    *GCCReg = Register.str();
2152
65
  return (EarlyClobber ? 
"&{"5
:
"{"60
) + Register.str() + "}";
2153
65
}
2154
2155
std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2156
    const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2157
347
    QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2158
347
  if (Info.allowsRegister() || 
!Info.allowsMemory()180
) {
2159
167
    if (CodeGenFunction::hasScalarEvaluationKind(InputType))
2160
162
      return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2161
2162
5
    llvm::Type *Ty = ConvertType(InputType);
2163
5
    uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2164
5
    if ((Size <= 64 && 
llvm::isPowerOf2_64(Size)3
) ||
2165
5
        
getTargetHooks().isScalarizableAsmOperand(*this, Ty)2
) {
2166
5
      Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2167
2168
5
      return {Builder.CreateLoad(Builder.CreateElementBitCast(
2169
5
                  InputValue.getAddress(*this), Ty)),
2170
5
              nullptr};
2171
5
    }
2172
5
  }
2173
2174
180
  Address Addr = InputValue.getAddress(*this);
2175
180
  ConstraintStr += '*';
2176
180
  return {Addr.getPointer(), Addr.getElementType()};
2177
347
}
2178
2179
std::pair<llvm::Value *, llvm::Type *>
2180
CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2181
                              const Expr *InputExpr,
2182
1.71k
                              std::string &ConstraintStr) {
2183
  // If this can't be a register or memory, i.e., has to be a constant
2184
  // (immediate or symbolic), try to emit it as such.
2185
1.71k
  if (!Info.allowsRegister() && 
!Info.allowsMemory()430
) {
2186
274
    if (Info.requiresImmediateConstant()) {
2187
61
      Expr::EvalResult EVResult;
2188
61
      InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2189
2190
61
      llvm::APSInt IntResult;
2191
61
      if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2192
61
                                          getContext()))
2193
59
        return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2194
61
    }
2195
2196
215
    Expr::EvalResult Result;
2197
215
    if (InputExpr->EvaluateAsInt(Result, getContext()))
2198
164
      return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2199
164
              nullptr};
2200
215
  }
2201
2202
1.48k
  if (Info.allowsRegister() || 
!Info.allowsMemory()207
)
2203
1.33k
    if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2204
1.32k
      return {EmitScalarExpr(InputExpr), nullptr};
2205
161
  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2206
3
    return {EmitScalarExpr(InputExpr), nullptr};
2207
158
  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2208
158
  LValue Dest = EmitLValue(InputExpr);
2209
158
  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2210
158
                            InputExpr->getExprLoc());
2211
161
}
2212
2213
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2214
/// asm call instruction.  The !srcloc MDNode contains a list of constant
2215
/// integers which are the source locations of the start of each line in the
2216
/// asm.
2217
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2218
1.70k
                                      CodeGenFunction &CGF) {
2219
1.70k
  SmallVector<llvm::Metadata *, 8> Locs;
2220
  // Add the location of the first line to the MDNode.
2221
1.70k
  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2222
1.70k
      CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2223
1.70k
  StringRef StrVal = Str->getString();
2224
1.70k
  if (!StrVal.empty()) {
2225
1.65k
    const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2226
1.65k
    const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2227
1.65k
    unsigned StartToken = 0;
2228
1.65k
    unsigned ByteOffset = 0;
2229
2230
    // Add the location of the start of each subsequent line of the asm to the
2231
    // MDNode.
2232
29.2k
    for (unsigned i = 0, e = StrVal.size() - 1; i != e; 
++i27.6k
) {
2233
27.6k
      if (StrVal[i] != '\n') 
continue26.9k
;
2234
632
      SourceLocation LineLoc = Str->getLocationOfByte(
2235
632
          i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2236
632
      Locs.push_back(llvm::ConstantAsMetadata::get(
2237
632
          llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2238
632
    }
2239
1.65k
  }
2240
2241
1.70k
  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2242
1.70k
}
2243
2244
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2245
                              bool HasUnwindClobber, bool ReadOnly,
2246
                              bool ReadNone, bool NoMerge, const AsmStmt &S,
2247
                              const std::vector<llvm::Type *> &ResultRegTypes,
2248
                              const std::vector<llvm::Type *> &ArgElemTypes,
2249
                              CodeGenFunction &CGF,
2250
1.87k
                              std::vector<llvm::Value *> &RegResults) {
2251
1.87k
  if (!HasUnwindClobber)
2252
1.87k
    Result.addFnAttr(llvm::Attribute::NoUnwind);
2253
2254
1.87k
  if (NoMerge)
2255
1
    Result.addFnAttr(llvm::Attribute::NoMerge);
2256
  // Attach readnone and readonly attributes.
2257
1.87k
  if (!HasSideEffect) {
2258
761
    if (ReadNone)
2259
305
      Result.addFnAttr(llvm::Attribute::ReadNone);
2260
456
    else if (ReadOnly)
2261
315
      Result.addFnAttr(llvm::Attribute::ReadOnly);
2262
761
  }
2263
2264
  // Add elementtype attribute for indirect constraints.
2265
2.09k
  for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2266
2.09k
    if (Pair.value()) {
2267
378
      auto Attr = llvm::Attribute::get(
2268
378
          CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2269
378
      Result.addParamAttr(Pair.index(), Attr);
2270
378
    }
2271
2.09k
  }
2272
2273
  // Slap the source location of the inline asm into a !srcloc metadata on the
2274
  // call.
2275
1.87k
  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2276
1.70k
    Result.setMetadata("srcloc",
2277
1.70k
                       getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2278
165
  else {
2279
    // At least put the line number on MS inline asm blobs.
2280
165
    llvm::Constant *Loc =
2281
165
        llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2282
165
    Result.setMetadata("srcloc",
2283
165
                       llvm::MDNode::get(CGF.getLLVMContext(),
2284
165
                                         llvm::ConstantAsMetadata::get(Loc)));
2285
165
  }
2286
2287
1.87k
  if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2288
    // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2289
    // convergent (meaning, they may call an intrinsically convergent op, such
2290
    // as bar.sync, and so can't have certain optimizations applied around
2291
    // them).
2292
13
    Result.addFnAttr(llvm::Attribute::Convergent);
2293
  // Extract all of the register value results from the asm.
2294
1.87k
  if (ResultRegTypes.size() == 1) {
2295
788
    RegResults.push_back(&Result);
2296
1.08k
  } else {
2297
1.38k
    for (unsigned i = 0, e = ResultRegTypes.size(); i != e; 
++i297
) {
2298
297
      llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2299
297
      RegResults.push_back(Tmp);
2300
297
    }
2301
1.08k
  }
2302
1.87k
}
2303
2304
1.87k
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2305
  // Pop all cleanup blocks at the end of the asm statement.
2306
1.87k
  CodeGenFunction::RunCleanupsScope Cleanups(*this);
2307
2308
  // Assemble the final asm string.
2309
1.87k
  std::string AsmString = S.generateAsmString(getContext());
2310
2311
  // Get all the output and input constraints together.
2312
1.87k
  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2313
1.87k
  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2314
2315
3.12k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.25k
) {
2316
1.25k
    StringRef Name;
2317
1.25k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2318
1.22k
      Name = GAS->getOutputName(i);
2319
1.25k
    TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2320
1.25k
    bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2321
1.25k
    assert(IsValid && "Failed to parse output constraint");
2322
0
    OutputConstraintInfos.push_back(Info);
2323
1.25k
  }
2324
2325
3.58k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.71k
) {
2326
1.71k
    StringRef Name;
2327
1.71k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2328
1.59k
      Name = GAS->getInputName(i);
2329
1.71k
    TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2330
1.71k
    bool IsValid =
2331
1.71k
      getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2332
1.71k
    assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2333
1.71k
    InputConstraintInfos.push_back(Info);
2334
1.71k
  }
2335
2336
1.87k
  std::string Constraints;
2337
2338
1.87k
  std::vector<LValue> ResultRegDests;
2339
1.87k
  std::vector<QualType> ResultRegQualTys;
2340
1.87k
  std::vector<llvm::Type *> ResultRegTypes;
2341
1.87k
  std::vector<llvm::Type *> ResultTruncRegTypes;
2342
1.87k
  std::vector<llvm::Type *> ArgTypes;
2343
1.87k
  std::vector<llvm::Type *> ArgElemTypes;
2344
1.87k
  std::vector<llvm::Value*> Args;
2345
1.87k
  llvm::BitVector ResultTypeRequiresCast;
2346
2347
  // Keep track of inout constraints.
2348
1.87k
  std::string InOutConstraints;
2349
1.87k
  std::vector<llvm::Value*> InOutArgs;
2350
1.87k
  std::vector<llvm::Type*> InOutArgTypes;
2351
1.87k
  std::vector<llvm::Type*> InOutArgElemTypes;
2352
2353
  // Keep track of out constraints for tied input operand.
2354
1.87k
  std::vector<std::string> OutputConstraints;
2355
2356
  // Keep track of defined physregs.
2357
1.87k
  llvm::SmallSet<std::string, 8> PhysRegOutputs;
2358
2359
  // An inline asm can be marked readonly if it meets the following conditions:
2360
  //  - it doesn't have any sideeffects
2361
  //  - it doesn't clobber memory
2362
  //  - it doesn't return a value by-reference
2363
  // It can be marked readnone if it doesn't have any input memory constraints
2364
  // in addition to meeting the conditions listed above.
2365
1.87k
  bool ReadOnly = true, ReadNone = true;
2366
2367
3.12k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.25k
) {
2368
1.25k
    TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2369
2370
    // Simplify the output constraint.
2371
1.25k
    std::string OutputConstraint(S.getOutputConstraint(i));
2372
1.25k
    OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2373
1.25k
                                          getTarget(), &OutputConstraintInfos);
2374
2375
1.25k
    const Expr *OutExpr = S.getOutputExpr(i);
2376
1.25k
    OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2377
2378
1.25k
    std::string GCCReg;
2379
1.25k
    OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2380
1.25k
                                              getTarget(), CGM, S,
2381
1.25k
                                              Info.earlyClobber(),
2382
1.25k
                                              &GCCReg);
2383
    // Give an error on multiple outputs to same physreg.
2384
1.25k
    if (!GCCReg.empty() && 
!PhysRegOutputs.insert(GCCReg).second12
)
2385
0
      CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2386
2387
1.25k
    OutputConstraints.push_back(OutputConstraint);
2388
1.25k
    LValue Dest = EmitLValue(OutExpr);
2389
1.25k
    if (!Constraints.empty())
2390
265
      Constraints += ',';
2391
2392
    // If this is a register output, then make the inline asm return it
2393
    // by-value.  If this is a memory result, return the value by-reference.
2394
1.25k
    QualType QTy = OutExpr->getType();
2395
1.25k
    const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2396
1.25k
                                     
hasAggregateEvaluationKind(QTy)21
;
2397
1.25k
    if (!Info.allowsMemory() && 
IsScalarOrAggregate1.05k
) {
2398
2399
1.05k
      Constraints += "=" + OutputConstraint;
2400
1.05k
      ResultRegQualTys.push_back(QTy);
2401
1.05k
      ResultRegDests.push_back(Dest);
2402
2403
1.05k
      llvm::Type *Ty = ConvertTypeForMem(QTy);
2404
1.05k
      const bool RequiresCast = Info.allowsRegister() &&
2405
1.05k
          (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2406
1.05k
           
Ty->isAggregateType()1.05k
);
2407
2408
1.05k
      ResultTruncRegTypes.push_back(Ty);
2409
1.05k
      ResultTypeRequiresCast.push_back(RequiresCast);
2410
2411
1.05k
      if (RequiresCast) {
2412
19
        unsigned Size = getContext().getTypeSize(QTy);
2413
19
        Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2414
19
      }
2415
1.05k
      ResultRegTypes.push_back(Ty);
2416
      // If this output is tied to an input, and if the input is larger, then
2417
      // we need to set the actual result type of the inline asm node to be the
2418
      // same as the input type.
2419
1.05k
      if (Info.hasMatchingInput()) {
2420
36
        unsigned InputNo;
2421
39
        for (InputNo = 0; InputNo != S.getNumInputs(); 
++InputNo3
) {
2422
39
          TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2423
39
          if (Input.hasTiedOperand() && 
Input.getTiedOperand() == i38
)
2424
36
            break;
2425
39
        }
2426
36
        assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2427
2428
0
        QualType InputTy = S.getInputExpr(InputNo)->getType();
2429
36
        QualType OutputType = OutExpr->getType();
2430
2431
36
        uint64_t InputSize = getContext().getTypeSize(InputTy);
2432
36
        if (getContext().getTypeSize(OutputType) < InputSize) {
2433
          // Form the asm to return the value as a larger integer or fp type.
2434
4
          ResultRegTypes.back() = ConvertType(InputTy);
2435
4
        }
2436
36
      }
2437
1.05k
      if (llvm::Type* AdjTy =
2438
1.05k
            getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2439
1.05k
                                                 ResultRegTypes.back()))
2440
1.05k
        ResultRegTypes.back() = AdjTy;
2441
0
      else {
2442
0
        CGM.getDiags().Report(S.getAsmLoc(),
2443
0
                              diag::err_asm_invalid_type_in_input)
2444
0
            << OutExpr->getType() << OutputConstraint;
2445
0
      }
2446
2447
      // Update largest vector width for any vector types.
2448
1.05k
      if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2449
115
        LargestVectorWidth =
2450
115
            std::max((uint64_t)LargestVectorWidth,
2451
115
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2452
1.05k
    } else {
2453
198
      Address DestAddr = Dest.getAddress(*this);
2454
      // Matrix types in memory are represented by arrays, but accessed through
2455
      // vector pointers, with the alignment specified on the access operation.
2456
      // For inline assembly, update pointer arguments to use vector pointers.
2457
      // Otherwise there will be a mis-match if the matrix is also an
2458
      // input-argument which is represented as vector.
2459
198
      if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2460
1
        DestAddr = Builder.CreateElementBitCast(
2461
1
            DestAddr, ConvertType(OutExpr->getType()));
2462
2463
198
      ArgTypes.push_back(DestAddr.getType());
2464
198
      ArgElemTypes.push_back(DestAddr.getElementType());
2465
198
      Args.push_back(DestAddr.getPointer());
2466
198
      Constraints += "=*";
2467
198
      Constraints += OutputConstraint;
2468
198
      ReadOnly = ReadNone = false;
2469
198
    }
2470
2471
1.25k
    if (Info.isReadWrite()) {
2472
189
      InOutConstraints += ',';
2473
2474
189
      const Expr *InputExpr = S.getOutputExpr(i);
2475
189
      llvm::Value *Arg;
2476
189
      llvm::Type *ArgElemType;
2477
189
      std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2478
189
          Info, Dest, InputExpr->getType(), InOutConstraints,
2479
189
          InputExpr->getExprLoc());
2480
2481
189
      if (llvm::Type* AdjTy =
2482
189
          getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2483
189
                                               Arg->getType()))
2484
189
        Arg = Builder.CreateBitCast(Arg, AdjTy);
2485
2486
      // Update largest vector width for any vector types.
2487
189
      if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2488
2
        LargestVectorWidth =
2489
2
            std::max((uint64_t)LargestVectorWidth,
2490
2
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2491
      // Only tie earlyclobber physregs.
2492
189
      if (Info.allowsRegister() && 
(162
GCCReg.empty()162
||
Info.earlyClobber()3
))
2493
160
        InOutConstraints += llvm::utostr(i);
2494
29
      else
2495
29
        InOutConstraints += OutputConstraint;
2496
2497
189
      InOutArgTypes.push_back(Arg->getType());
2498
189
      InOutArgElemTypes.push_back(ArgElemType);
2499
189
      InOutArgs.push_back(Arg);
2500
189
    }
2501
1.25k
  }
2502
2503
  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2504
  // to the return value slot. Only do this when returning in registers.
2505
1.87k
  if (isa<MSAsmStmt>(&S)) {
2506
165
    const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2507
165
    if (RetAI.isDirect() || 
RetAI.isExtend()138
) {
2508
      // Make a fake lvalue for the return value slot.
2509
30
      LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
2510
30
      CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2511
30
          *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2512
30
          ResultRegDests, AsmString, S.getNumOutputs());
2513
30
      SawAsmBlock = true;
2514
30
    }
2515
165
  }
2516
2517
3.58k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.71k
) {
2518
1.71k
    const Expr *InputExpr = S.getInputExpr(i);
2519
2520
1.71k
    TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2521
2522
1.71k
    if (Info.allowsMemory())
2523
583
      ReadNone = false;
2524
2525
1.71k
    if (!Constraints.empty())
2526
1.45k
      Constraints += ',';
2527
2528
    // Simplify the input constraint.
2529
1.71k
    std::string InputConstraint(S.getInputConstraint(i));
2530
1.71k
    InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2531
1.71k
                                         &OutputConstraintInfos);
2532
2533
1.71k
    InputConstraint = AddVariableConstraints(
2534
1.71k
        InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2535
1.71k
        getTarget(), CGM, S, false /* No EarlyClobber */);
2536
2537
1.71k
    std::string ReplaceConstraint (InputConstraint);
2538
1.71k
    llvm::Value *Arg;
2539
1.71k
    llvm::Type *ArgElemType;
2540
1.71k
    std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2541
2542
    // If this input argument is tied to a larger output result, extend the
2543
    // input to be the same size as the output.  The LLVM backend wants to see
2544
    // the input and output of a matching constraint be the same size.  Note
2545
    // that GCC does not define what the top bits are here.  We use zext because
2546
    // that is usually cheaper, but LLVM IR should really get an anyext someday.
2547
1.71k
    if (Info.hasTiedOperand()) {
2548
36
      unsigned Output = Info.getTiedOperand();
2549
36
      QualType OutputType = S.getOutputExpr(Output)->getType();
2550
36
      QualType InputTy = InputExpr->getType();
2551
2552
36
      if (getContext().getTypeSize(OutputType) >
2553
36
          getContext().getTypeSize(InputTy)) {
2554
        // Use ptrtoint as appropriate so that we can do our extension.
2555
4
        if (isa<llvm::PointerType>(Arg->getType()))
2556
1
          Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2557
4
        llvm::Type *OutputTy = ConvertType(OutputType);
2558
4
        if (isa<llvm::IntegerType>(OutputTy))
2559
1
          Arg = Builder.CreateZExt(Arg, OutputTy);
2560
3
        else if (isa<llvm::PointerType>(OutputTy))
2561
1
          Arg = Builder.CreateZExt(Arg, IntPtrTy);
2562
2
        else if (OutputTy->isFloatingPointTy())
2563
1
          Arg = Builder.CreateFPExt(Arg, OutputTy);
2564
4
      }
2565
      // Deal with the tied operands' constraint code in adjustInlineAsmType.
2566
36
      ReplaceConstraint = OutputConstraints[Output];
2567
36
    }
2568
1.71k
    if (llvm::Type* AdjTy =
2569
1.71k
          getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2570
1.71k
                                                   Arg->getType()))
2571
1.71k
      Arg = Builder.CreateBitCast(Arg, AdjTy);
2572
0
    else
2573
0
      CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2574
0
          << InputExpr->getType() << InputConstraint;
2575
2576
    // Update largest vector width for any vector types.
2577
1.71k
    if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2578
125
      LargestVectorWidth =
2579
125
          std::max((uint64_t)LargestVectorWidth,
2580
125
                   VT->getPrimitiveSizeInBits().getKnownMinSize());
2581
2582
1.71k
    ArgTypes.push_back(Arg->getType());
2583
1.71k
    ArgElemTypes.push_back(ArgElemType);
2584
1.71k
    Args.push_back(Arg);
2585
1.71k
    Constraints += InputConstraint;
2586
1.71k
  }
2587
2588
  // Append the "input" part of inout constraints.
2589
2.06k
  for (unsigned i = 0, e = InOutArgs.size(); i != e; 
i++189
) {
2590
189
    ArgTypes.push_back(InOutArgTypes[i]);
2591
189
    ArgElemTypes.push_back(InOutArgElemTypes[i]);
2592
189
    Args.push_back(InOutArgs[i]);
2593
189
  }
2594
1.87k
  Constraints += InOutConstraints;
2595
2596
  // Labels
2597
1.87k
  SmallVector<llvm::BasicBlock *, 16> Transfer;
2598
1.87k
  llvm::BasicBlock *Fallthrough = nullptr;
2599
1.87k
  bool IsGCCAsmGoto = false;
2600
1.87k
  if (const auto *GS =  dyn_cast<GCCAsmStmt>(&S)) {
2601
1.70k
    IsGCCAsmGoto = GS->isAsmGoto();
2602
1.70k
    if (IsGCCAsmGoto) {
2603
50
      for (const auto *E : GS->labels()) {
2604
50
        JumpDest Dest = getJumpDestForLabel(E->getLabel());
2605
50
        Transfer.push_back(Dest.getBlock());
2606
50
        if (!Constraints.empty())
2607
45
          Constraints += ',';
2608
50
        Constraints += "!i";
2609
50
      }
2610
31
      Fallthrough = createBasicBlock("asm.fallthrough");
2611
31
    }
2612
1.70k
  }
2613
2614
1.87k
  bool HasUnwindClobber = false;
2615
2616
  // Clobbers
2617
3.20k
  for (unsigned i = 0, e = S.getNumClobbers(); i != e; 
i++1.33k
) {
2618
1.33k
    StringRef Clobber = S.getClobber(i);
2619
2620
1.33k
    if (Clobber == "memory")
2621
150
      ReadOnly = ReadNone = false;
2622
1.18k
    else if (Clobber == "unwind") {
2623
1
      HasUnwindClobber = true;
2624
1
      continue;
2625
1.18k
    } else if (Clobber != "cc") {
2626
1.12k
      Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2627
1.12k
      if (CGM.getCodeGenOpts().StackClashProtector &&
2628
1.12k
          
getTarget().isSPRegName(Clobber)3
) {
2629
3
        CGM.getDiags().Report(S.getAsmLoc(),
2630
3
                              diag::warn_stack_clash_protection_inline_asm);
2631
3
      }
2632
1.12k
    }
2633
2634
1.33k
    if (isa<MSAsmStmt>(&S)) {
2635
194
      if (Clobber == "eax" || 
Clobber == "edx"93
) {
2636
117
        if (Constraints.find("=&A") != std::string::npos)
2637
3
          continue;
2638
114
        std::string::size_type position1 =
2639
114
            Constraints.find("={" + Clobber.str() + "}");
2640
114
        if (position1 != std::string::npos) {
2641
13
          Constraints.insert(position1 + 1, "&");
2642
13
          continue;
2643
13
        }
2644
101
        std::string::size_type position2 = Constraints.find("=A");
2645
101
        if (position2 != std::string::npos) {
2646
3
          Constraints.insert(position2 + 1, "&");
2647
3
          continue;
2648
3
        }
2649
101
      }
2650
194
    }
2651
1.31k
    if (!Constraints.empty())
2652
935
      Constraints += ',';
2653
2654
1.31k
    Constraints += "~{";
2655
1.31k
    Constraints += Clobber;
2656
1.31k
    Constraints += '}';
2657
1.31k
  }
2658
2659
1.87k
  assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2660
1.87k
         "unwind clobber can't be used with asm goto");
2661
2662
  // Add machine specific clobbers
2663
0
  std::string MachineClobbers = getTarget().getClobbers();
2664
1.87k
  if (!MachineClobbers.empty()) {
2665
1.17k
    if (!Constraints.empty())
2666
961
      Constraints += ',';
2667
1.17k
    Constraints += MachineClobbers;
2668
1.17k
  }
2669
2670
1.87k
  llvm::Type *ResultType;
2671
1.87k
  if (ResultRegTypes.empty())
2672
988
    ResultType = VoidTy;
2673
885
  else if (ResultRegTypes.size() == 1)
2674
788
    ResultType = ResultRegTypes[0];
2675
97
  else
2676
97
    ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2677
2678
1.87k
  llvm::FunctionType *FTy =
2679
1.87k
    llvm::FunctionType::get(ResultType, ArgTypes, false);
2680
2681
1.87k
  bool HasSideEffect = S.isVolatile() || 
S.getNumOutputs() == 01.08k
;
2682
2683
1.87k
  llvm::InlineAsm::AsmDialect GnuAsmDialect =
2684
1.87k
      CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2685
1.87k
          ? 
llvm::InlineAsm::AD_ATT1.78k
2686
1.87k
          : 
llvm::InlineAsm::AD_Intel88
;
2687
1.87k
  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2688
1.70k
    
llvm::InlineAsm::AD_Intel165
: GnuAsmDialect;
2689
2690
1.87k
  llvm::InlineAsm *IA = llvm::InlineAsm::get(
2691
1.87k
      FTy, AsmString, Constraints, HasSideEffect,
2692
1.87k
      /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2693
1.87k
  std::vector<llvm::Value*> RegResults;
2694
1.87k
  if (IsGCCAsmGoto) {
2695
31
    llvm::CallBrInst *Result =
2696
31
        Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2697
31
    EmitBlock(Fallthrough);
2698
31
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2699
31
                      ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2700
31
                      ResultRegTypes, ArgElemTypes, *this, RegResults);
2701
1.84k
  } else if (HasUnwindClobber) {
2702
1
    llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2703
1
    UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2704
1
                      InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2705
1
                      *this, RegResults);
2706
1.84k
  } else {
2707
1.84k
    llvm::CallInst *Result =
2708
1.84k
        Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2709
1.84k
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2710
1.84k
                      ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2711
1.84k
                      ResultRegTypes, ArgElemTypes, *this, RegResults);
2712
1.84k
  }
2713
2714
1.87k
  assert(RegResults.size() == ResultRegTypes.size());
2715
0
  assert(RegResults.size() == ResultTruncRegTypes.size());
2716
0
  assert(RegResults.size() == ResultRegDests.size());
2717
  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2718
  // in which case its size may grow.
2719
0
  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2720
2.95k
  for (unsigned i = 0, e = RegResults.size(); i != e; 
++i1.08k
) {
2721
1.08k
    llvm::Value *Tmp = RegResults[i];
2722
1.08k
    llvm::Type *TruncTy = ResultTruncRegTypes[i];
2723
2724
    // If the result type of the LLVM IR asm doesn't match the result type of
2725
    // the expression, do the conversion.
2726
1.08k
    if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2727
2728
      // Truncate the integer result to the right size, note that TruncTy can be
2729
      // a pointer.
2730
37
      if (TruncTy->isFloatingPointTy())
2731
1
        Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2732
36
      else if (TruncTy->isPointerTy() && 
Tmp->getType()->isIntegerTy()0
) {
2733
0
        uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2734
0
        Tmp = Builder.CreateTrunc(Tmp,
2735
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2736
0
        Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2737
36
      } else if (Tmp->getType()->isPointerTy() && 
TruncTy->isIntegerTy()0
) {
2738
0
        uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2739
0
        Tmp = Builder.CreatePtrToInt(Tmp,
2740
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2741
0
        Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2742
36
      } else if (TruncTy->isIntegerTy()) {
2743
7
        Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2744
29
      } else if (TruncTy->isVectorTy()) {
2745
10
        Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2746
10
      }
2747
37
    }
2748
2749
1.08k
    LValue Dest = ResultRegDests[i];
2750
    // ResultTypeRequiresCast elements correspond to the first
2751
    // ResultTypeRequiresCast.size() elements of RegResults.
2752
1.08k
    if ((i < ResultTypeRequiresCast.size()) && 
ResultTypeRequiresCast[i]1.05k
) {
2753
19
      unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2754
19
      Address A = Builder.CreateElementBitCast(Dest.getAddress(*this),
2755
19
                                               ResultRegTypes[i]);
2756
19
      if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
2757
1
        Builder.CreateStore(Tmp, A);
2758
1
        continue;
2759
1
      }
2760
2761
18
      QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2762
18
      if (Ty.isNull()) {
2763
4
        const Expr *OutExpr = S.getOutputExpr(i);
2764
4
        CGM.getDiags().Report(OutExpr->getExprLoc(),
2765
4
                              diag::err_store_value_to_reg);
2766
4
        return;
2767
4
      }
2768
14
      Dest = MakeAddrLValue(A, Ty);
2769
14
    }
2770
1.08k
    EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2771
1.08k
  }
2772
1.87k
}
2773
2774
1.04k
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2775
1.04k
  const RecordDecl *RD = S.getCapturedRecordDecl();
2776
1.04k
  QualType RecordTy = getContext().getRecordType(RD);
2777
2778
  // Initialize the captured struct.
2779
1.04k
  LValue SlotLV =
2780
1.04k
    MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2781
2782
1.04k
  RecordDecl::field_iterator CurField = RD->field_begin();
2783
1.04k
  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2784
1.04k
                                                 E = S.capture_init_end();
2785
2.43k
       I != E; 
++I, ++CurField1.39k
) {
2786
1.39k
    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2787
1.39k
    if (CurField->hasCapturedVLAType()) {
2788
49
      EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2789
1.34k
    } else {
2790
1.34k
      EmitInitializerForField(*CurField, LV, *I);
2791
1.34k
    }
2792
1.39k
  }
2793
2794
1.04k
  return SlotLV;
2795
1.04k
}
2796
2797
/// Generate an outlined function for the body of a CapturedStmt, store any
2798
/// captured variables into the captured struct, and call the outlined function.
2799
llvm::Function *
2800
27
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2801
27
  LValue CapStruct = InitCapturedStruct(S);
2802
2803
  // Emit the CapturedDecl
2804
27
  CodeGenFunction CGF(CGM, true);
2805
27
  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2806
27
  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2807
27
  delete CGF.CapturedStmtInfo;
2808
2809
  // Emit call to the helper function.
2810
27
  EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2811
2812
27
  return F;
2813
27
}
2814
2815
930
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2816
930
  LValue CapStruct = InitCapturedStruct(S);
2817
930
  return CapStruct.getAddress(*this);
2818
930
}
2819
2820
/// Creates the outlined function for a CapturedStmt.
2821
llvm::Function *
2822
1.04k
CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2823
1.04k
  assert(CapturedStmtInfo &&
2824
1.04k
    "CapturedStmtInfo should be set when generating the captured function");
2825
0
  const CapturedDecl *CD = S.getCapturedDecl();
2826
1.04k
  const RecordDecl *RD = S.getCapturedRecordDecl();
2827
1.04k
  SourceLocation Loc = S.getBeginLoc();
2828
1.04k
  assert(CD->hasBody() && "missing CapturedDecl body");
2829
2830
  // Build the argument list.
2831
0
  ASTContext &Ctx = CGM.getContext();
2832
1.04k
  FunctionArgList Args;
2833
1.04k
  Args.append(CD->param_begin(), CD->param_end());
2834
2835
  // Create the function declaration.
2836
1.04k
  const CGFunctionInfo &FuncInfo =
2837
1.04k
    CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2838
1.04k
  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2839
2840
1.04k
  llvm::Function *F =
2841
1.04k
    llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2842
1.04k
                           CapturedStmtInfo->getHelperName(), &CGM.getModule());
2843
1.04k
  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2844
1.04k
  if (CD->isNothrow())
2845
776
    F->addFnAttr(llvm::Attribute::NoUnwind);
2846
2847
  // Generate the function.
2848
1.04k
  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2849
1.04k
                CD->getBody()->getBeginLoc());
2850
  // Set the context parameter in CapturedStmtInfo.
2851
1.04k
  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2852
1.04k
  CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2853
2854
  // Initialize variable-length arrays.
2855
1.04k
  LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2856
1.04k
                                           Ctx.getTagDeclType(RD));
2857
1.39k
  for (auto *FD : RD->fields()) {
2858
1.39k
    if (FD->hasCapturedVLAType()) {
2859
49
      auto *ExprArg =
2860
49
          EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2861
49
              .getScalarVal();
2862
49
      auto VAT = FD->getCapturedVLAType();
2863
49
      VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2864
49
    }
2865
1.39k
  }
2866
2867
  // If 'this' is captured, load it into CXXThisValue.
2868
1.04k
  if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2869
27
    FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2870
27
    LValue ThisLValue = EmitLValueForField(Base, FD);
2871
27
    CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2872
27
  }
2873
2874
1.04k
  PGO.assignRegionCounters(GlobalDecl(CD), F);
2875
1.04k
  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2876
1.04k
  FinishFunction(CD->getBodyRBrace());
2877
2878
1.04k
  return F;
2879
1.04k
}