Coverage Report

Created: 2021-09-21 08:58

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGStmt.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Stmt nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGDebugInfo.h"
14
#include "CGOpenMPRuntime.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "TargetInfo.h"
18
#include "clang/AST/Attr.h"
19
#include "clang/AST/Expr.h"
20
#include "clang/AST/Stmt.h"
21
#include "clang/AST/StmtVisitor.h"
22
#include "clang/Basic/Builtins.h"
23
#include "clang/Basic/DiagnosticSema.h"
24
#include "clang/Basic/PrettyStackTrace.h"
25
#include "clang/Basic/SourceManager.h"
26
#include "clang/Basic/TargetInfo.h"
27
#include "llvm/ADT/SmallSet.h"
28
#include "llvm/ADT/StringExtras.h"
29
#include "llvm/IR/DataLayout.h"
30
#include "llvm/IR/InlineAsm.h"
31
#include "llvm/IR/Intrinsics.h"
32
#include "llvm/IR/MDBuilder.h"
33
#include "llvm/Support/SaveAndRestore.h"
34
35
using namespace clang;
36
using namespace CodeGen;
37
38
//===----------------------------------------------------------------------===//
39
//                              Statement Emission
40
//===----------------------------------------------------------------------===//
41
42
948k
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
43
948k
  if (CGDebugInfo *DI = getDebugInfo()) {
44
510k
    SourceLocation Loc;
45
510k
    Loc = S->getBeginLoc();
46
510k
    DI->EmitLocation(Builder, Loc);
47
48
510k
    LastStopPoint = Loc;
49
510k
  }
50
948k
}
51
52
1.03M
void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
53
1.03M
  assert(S && "Null statement?");
54
0
  PGO.setCurrentStmt(S);
55
56
  // These statements have their own debug info handling.
57
1.03M
  if (EmitSimpleStmt(S, Attrs))
58
342k
    return;
59
60
  // Check if we are generating unreachable code.
61
696k
  if (!HaveInsertPoint()) {
62
    // If so, and the statement doesn't contain a label, then we do not need to
63
    // generate actual code. This is safe because (1) the current point is
64
    // unreachable, so we don't need to execute the code, and (2) we've already
65
    // handled the statements which update internal data structures (like the
66
    // local variable map) which could be used by subsequent statements.
67
157
    if (!ContainsLabel(S)) {
68
      // Verify that any decl statements were handled as simple, they may be in
69
      // scope of subsequent reachable statements.
70
152
      assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
71
0
      return;
72
152
    }
73
74
    // Otherwise, make a new block to hold the code.
75
5
    EnsureInsertPoint();
76
5
  }
77
78
  // Generate a stoppoint if we are emitting debug info.
79
696k
  EmitStopPoint(S);
80
81
  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
82
  // enabled.
83
696k
  if (getLangOpts().OpenMP && 
getLangOpts().OpenMPSimd96.5k
) {
84
48.0k
    if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
85
14.3k
      EmitSimpleOMPExecutableDirective(*D);
86
14.3k
      return;
87
14.3k
    }
88
48.0k
  }
89
90
681k
  switch (S->getStmtClass()) {
91
0
  case Stmt::NoStmtClass:
92
0
  case Stmt::CXXCatchStmtClass:
93
0
  case Stmt::SEHExceptStmtClass:
94
0
  case Stmt::SEHFinallyStmtClass:
95
0
  case Stmt::MSDependentExistsStmtClass:
96
0
    llvm_unreachable("invalid statement class to emit generically");
97
0
  case Stmt::NullStmtClass:
98
0
  case Stmt::CompoundStmtClass:
99
0
  case Stmt::DeclStmtClass:
100
0
  case Stmt::LabelStmtClass:
101
0
  case Stmt::AttributedStmtClass:
102
0
  case Stmt::GotoStmtClass:
103
0
  case Stmt::BreakStmtClass:
104
0
  case Stmt::ContinueStmtClass:
105
0
  case Stmt::DefaultStmtClass:
106
0
  case Stmt::CaseStmtClass:
107
0
  case Stmt::SEHLeaveStmtClass:
108
0
    llvm_unreachable("should have emitted these statements as simple");
109
110
0
#define STMT(Type, Base)
111
0
#define ABSTRACT_STMT(Op)
112
0
#define EXPR(Type, Base) \
113
28.2M
  case Stmt::Type##Class:
114
309k
#include 
"clang/AST/StmtNodes.inc"0
115
309k
  {
116
    // Remember the block we came in on.
117
309k
    llvm::BasicBlock *incoming = Builder.GetInsertBlock();
118
309k
    assert(incoming && "expression emission must have an insertion point");
119
120
0
    EmitIgnoredExpr(cast<Expr>(S));
121
122
309k
    llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
123
309k
    assert(outgoing && "expression emission cleared block!");
124
125
    // The expression emitters assume (reasonably!) that the insertion
126
    // point is always set.  To maintain that, the call-emission code
127
    // for noreturn functions has to enter a new block with no
128
    // predecessors.  We want to kill that block and mark the current
129
    // insertion point unreachable in the common case of a call like
130
    // "exit();".  Since expression emission doesn't otherwise create
131
    // blocks with no predecessors, we can just test for that.
132
    // However, we must be careful not to do this to our incoming
133
    // block, because *statement* emission does sometimes create
134
    // reachable blocks which will have no predecessors until later in
135
    // the function.  This occurs with, e.g., labels that are not
136
    // reachable by fallthrough.
137
309k
    if (incoming != outgoing && 
outgoing->use_empty()20.9k
) {
138
2.75k
      outgoing->eraseFromParent();
139
2.75k
      Builder.ClearInsertionPoint();
140
2.75k
    }
141
309k
    break;
142
27.9M
  }
143
144
22
  case Stmt::IndirectGotoStmtClass:
145
22
    EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
146
147
116k
  case Stmt::IfStmtClass:      EmitIfStmt(cast<IfStmt>(*S));              break;
148
1.89k
  case Stmt::WhileStmtClass:   EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
149
752
  case Stmt::DoStmtClass:      EmitDoStmt(cast<DoStmt>(*S), Attrs);       break;
150
17.1k
  case Stmt::ForStmtClass:     EmitForStmt(cast<ForStmt>(*S), Attrs);     break;
151
152
215k
  case Stmt::ReturnStmtClass:  EmitReturnStmt(cast<ReturnStmt>(*S));      break;
153
154
492
  case Stmt::SwitchStmtClass:  EmitSwitchStmt(cast<SwitchStmt>(*S));      break;
155
1.66k
  case Stmt::GCCAsmStmtClass:  // Intentional fall-through.
156
1.82k
  case Stmt::MSAsmStmtClass:   EmitAsmStmt(cast<AsmStmt>(*S));            break;
157
55
  case Stmt::CoroutineBodyStmtClass:
158
55
    EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
159
55
    break;
160
54
  case Stmt::CoreturnStmtClass:
161
54
    EmitCoreturnStmt(cast<CoreturnStmt>(*S));
162
54
    break;
163
27
  case Stmt::CapturedStmtClass: {
164
27
    const CapturedStmt *CS = cast<CapturedStmt>(S);
165
27
    EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
166
27
    }
167
27
    break;
168
220
  case Stmt::ObjCAtTryStmtClass:
169
220
    EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
170
220
    break;
171
0
  case Stmt::ObjCAtCatchStmtClass:
172
0
    llvm_unreachable(
173
0
                    "@catch statements should be handled by EmitObjCAtTryStmt");
174
0
  case Stmt::ObjCAtFinallyStmtClass:
175
0
    llvm_unreachable(
176
0
                  "@finally statements should be handled by EmitObjCAtTryStmt");
177
50
  case Stmt::ObjCAtThrowStmtClass:
178
50
    EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
179
50
    break;
180
13
  case Stmt::ObjCAtSynchronizedStmtClass:
181
13
    EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
182
13
    break;
183
70
  case Stmt::ObjCForCollectionStmtClass:
184
70
    EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
185
70
    break;
186
113
  case Stmt::ObjCAutoreleasePoolStmtClass:
187
113
    EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
188
113
    break;
189
190
338
  case Stmt::CXXTryStmtClass:
191
338
    EmitCXXTryStmt(cast<CXXTryStmt>(*S));
192
338
    break;
193
128
  case Stmt::CXXForRangeStmtClass:
194
128
    EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
195
128
    break;
196
137
  case Stmt::SEHTryStmtClass:
197
137
    EmitSEHTryStmt(cast<SEHTryStmt>(*S));
198
137
    break;
199
0
  case Stmt::OMPMetaDirectiveClass:
200
0
    EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
201
0
    break;
202
32
  case Stmt::OMPCanonicalLoopClass:
203
32
    EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
204
32
    break;
205
1.04k
  case Stmt::OMPParallelDirectiveClass:
206
1.04k
    EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
207
1.04k
    break;
208
181
  case Stmt::OMPSimdDirectiveClass:
209
181
    EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
210
181
    break;
211
8
  case Stmt::OMPTileDirectiveClass:
212
8
    EmitOMPTileDirective(cast<OMPTileDirective>(*S));
213
8
    break;
214
18
  case Stmt::OMPUnrollDirectiveClass:
215
18
    EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
216
18
    break;
217
423
  case Stmt::OMPForDirectiveClass:
218
423
    EmitOMPForDirective(cast<OMPForDirective>(*S));
219
423
    break;
220
253
  case Stmt::OMPForSimdDirectiveClass:
221
253
    EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
222
253
    break;
223
62
  case Stmt::OMPSectionsDirectiveClass:
224
62
    EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
225
62
    break;
226
54
  case Stmt::OMPSectionDirectiveClass:
227
54
    EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
228
54
    break;
229
57
  case Stmt::OMPSingleDirectiveClass:
230
57
    EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
231
57
    break;
232
25
  case Stmt::OMPMasterDirectiveClass:
233
25
    EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
234
25
    break;
235
102
  case Stmt::OMPCriticalDirectiveClass:
236
102
    EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
237
102
    break;
238
266
  case Stmt::OMPParallelForDirectiveClass:
239
266
    EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
240
266
    break;
241
105
  case Stmt::OMPParallelForSimdDirectiveClass:
242
105
    EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
243
105
    break;
244
22
  case Stmt::OMPParallelMasterDirectiveClass:
245
22
    EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
246
22
    break;
247
26
  case Stmt::OMPParallelSectionsDirectiveClass:
248
26
    EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
249
26
    break;
250
183
  case Stmt::OMPTaskDirectiveClass:
251
183
    EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
252
183
    break;
253
16
  case Stmt::OMPTaskyieldDirectiveClass:
254
16
    EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
255
16
    break;
256
30
  case Stmt::OMPBarrierDirectiveClass:
257
30
    EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
258
30
    break;
259
12
  case Stmt::OMPTaskwaitDirectiveClass:
260
12
    EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
261
12
    break;
262
39
  case Stmt::OMPTaskgroupDirectiveClass:
263
39
    EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
264
39
    break;
265
40
  case Stmt::OMPFlushDirectiveClass:
266
40
    EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
267
40
    break;
268
14
  case Stmt::OMPDepobjDirectiveClass:
269
14
    EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
270
14
    break;
271
36
  case Stmt::OMPScanDirectiveClass:
272
36
    EmitOMPScanDirective(cast<OMPScanDirective>(*S));
273
36
    break;
274
80
  case Stmt::OMPOrderedDirectiveClass:
275
80
    EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
276
80
    break;
277
555
  case Stmt::OMPAtomicDirectiveClass:
278
555
    EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
279
555
    break;
280
5.06k
  case Stmt::OMPTargetDirectiveClass:
281
5.06k
    EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
282
5.06k
    break;
283
1.00k
  case Stmt::OMPTeamsDirectiveClass:
284
1.00k
    EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
285
1.00k
    break;
286
48
  case Stmt::OMPCancellationPointDirectiveClass:
287
48
    EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
288
48
    break;
289
163
  case Stmt::OMPCancelDirectiveClass:
290
163
    EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
291
163
    break;
292
163
  case Stmt::OMPTargetDataDirectiveClass:
293
163
    EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
294
163
    break;
295
107
  case Stmt::OMPTargetEnterDataDirectiveClass:
296
107
    EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
297
107
    break;
298
77
  case Stmt::OMPTargetExitDataDirectiveClass:
299
77
    EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
300
77
    break;
301
547
  case Stmt::OMPTargetParallelDirectiveClass:
302
547
    EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
303
547
    break;
304
344
  case Stmt::OMPTargetParallelForDirectiveClass:
305
344
    EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
306
344
    break;
307
39
  case Stmt::OMPTaskLoopDirectiveClass:
308
39
    EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
309
39
    break;
310
40
  case Stmt::OMPTaskLoopSimdDirectiveClass:
311
40
    EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
312
40
    break;
313
35
  case Stmt::OMPMasterTaskLoopDirectiveClass:
314
35
    EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
315
35
    break;
316
40
  case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
317
40
    EmitOMPMasterTaskLoopSimdDirective(
318
40
        cast<OMPMasterTaskLoopSimdDirective>(*S));
319
40
    break;
320
33
  case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
321
33
    EmitOMPParallelMasterTaskLoopDirective(
322
33
        cast<OMPParallelMasterTaskLoopDirective>(*S));
323
33
    break;
324
39
  case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
325
39
    EmitOMPParallelMasterTaskLoopSimdDirective(
326
39
        cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
327
39
    break;
328
110
  case Stmt::OMPDistributeDirectiveClass:
329
110
    EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
330
110
    break;
331
233
  case Stmt::OMPTargetUpdateDirectiveClass:
332
233
    EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
333
233
    break;
334
401
  case Stmt::OMPDistributeParallelForDirectiveClass:
335
401
    EmitOMPDistributeParallelForDirective(
336
401
        cast<OMPDistributeParallelForDirective>(*S));
337
401
    break;
338
313
  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
339
313
    EmitOMPDistributeParallelForSimdDirective(
340
313
        cast<OMPDistributeParallelForSimdDirective>(*S));
341
313
    break;
342
150
  case Stmt::OMPDistributeSimdDirectiveClass:
343
150
    EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
344
150
    break;
345
310
  case Stmt::OMPTargetParallelForSimdDirectiveClass:
346
310
    EmitOMPTargetParallelForSimdDirective(
347
310
        cast<OMPTargetParallelForSimdDirective>(*S));
348
310
    break;
349
355
  case Stmt::OMPTargetSimdDirectiveClass:
350
355
    EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
351
355
    break;
352
114
  case Stmt::OMPTeamsDistributeDirectiveClass:
353
114
    EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
354
114
    break;
355
126
  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
356
126
    EmitOMPTeamsDistributeSimdDirective(
357
126
        cast<OMPTeamsDistributeSimdDirective>(*S));
358
126
    break;
359
328
  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
360
328
    EmitOMPTeamsDistributeParallelForSimdDirective(
361
328
        cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
362
328
    break;
363
340
  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
364
340
    EmitOMPTeamsDistributeParallelForDirective(
365
340
        cast<OMPTeamsDistributeParallelForDirective>(*S));
366
340
    break;
367
757
  case Stmt::OMPTargetTeamsDirectiveClass:
368
757
    EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
369
757
    break;
370
462
  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
371
462
    EmitOMPTargetTeamsDistributeDirective(
372
462
        cast<OMPTargetTeamsDistributeDirective>(*S));
373
462
    break;
374
496
  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
375
496
    EmitOMPTargetTeamsDistributeParallelForDirective(
376
496
        cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
377
496
    break;
378
557
  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
379
557
    EmitOMPTargetTeamsDistributeParallelForSimdDirective(
380
557
        cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
381
557
    break;
382
428
  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
383
428
    EmitOMPTargetTeamsDistributeSimdDirective(
384
428
        cast<OMPTargetTeamsDistributeSimdDirective>(*S));
385
428
    break;
386
0
  case Stmt::OMPInteropDirectiveClass:
387
0
    llvm_unreachable("Interop directive not supported yet.");
388
0
    break;
389
0
  case Stmt::OMPDispatchDirectiveClass:
390
0
    llvm_unreachable("Dispatch directive not supported yet.");
391
0
    break;
392
40
  case Stmt::OMPMaskedDirectiveClass:
393
40
    EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
394
40
    break;
395
681k
  }
396
681k
}
397
398
bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
399
1.03M
                                     ArrayRef<const Attr *> Attrs) {
400
1.03M
  switch (S->getStmtClass()) {
401
696k
  default:
402
696k
    return false;
403
12.9k
  case Stmt::NullStmtClass:
404
12.9k
    break;
405
106k
  case Stmt::CompoundStmtClass:
406
106k
    EmitCompoundStmt(cast<CompoundStmt>(*S));
407
106k
    break;
408
201k
  case Stmt::DeclStmtClass:
409
201k
    EmitDeclStmt(cast<DeclStmt>(*S));
410
201k
    break;
411
202
  case Stmt::LabelStmtClass:
412
202
    EmitLabelStmt(cast<LabelStmt>(*S));
413
202
    break;
414
274
  case Stmt::AttributedStmtClass:
415
274
    EmitAttributedStmt(cast<AttributedStmt>(*S));
416
274
    break;
417
3.12k
  case Stmt::GotoStmtClass:
418
3.12k
    EmitGotoStmt(cast<GotoStmt>(*S));
419
3.12k
    break;
420
5.70k
  case Stmt::BreakStmtClass:
421
5.70k
    EmitBreakStmt(cast<BreakStmt>(*S));
422
5.70k
    break;
423
10.8k
  case Stmt::ContinueStmtClass:
424
10.8k
    EmitContinueStmt(cast<ContinueStmt>(*S));
425
10.8k
    break;
426
186
  case Stmt::DefaultStmtClass:
427
186
    EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
428
186
    break;
429
995
  case Stmt::CaseStmtClass:
430
995
    EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
431
995
    break;
432
10
  case Stmt::SEHLeaveStmtClass:
433
10
    EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
434
10
    break;
435
1.03M
  }
436
342k
  return true;
437
1.03M
}
438
439
/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
440
/// this captures the expression result of the last sub-statement and returns it
441
/// (for use by the statement expression extension).
442
Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
443
111k
                                          AggValueSlot AggSlot) {
444
111k
  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
445
111k
                             "LLVM IR generation of compound statement ('{}')");
446
447
  // Keep track of the current cleanup stack depth, including debug scopes.
448
111k
  LexicalScope Scope(*this, S.getSourceRange());
449
450
111k
  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
451
111k
}
452
453
Address
454
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
455
                                              bool GetLast,
456
387k
                                              AggValueSlot AggSlot) {
457
458
387k
  const Stmt *ExprResult = S.getStmtExprResult();
459
387k
  assert((!GetLast || (GetLast && ExprResult)) &&
460
387k
         "If GetLast is true then the CompoundStmt must have a StmtExprResult");
461
462
0
  Address RetAlloca = Address::invalid();
463
464
784k
  for (auto *CurStmt : S.body()) {
465
784k
    if (GetLast && 
ExprResult == CurStmt16.1k
) {
466
      // We have to special case labels here.  They are statements, but when put
467
      // at the end of a statement expression, they yield the value of their
468
      // subexpression.  Handle this by walking through all labels we encounter,
469
      // emitting them before we evaluate the subexpr.
470
      // Similar issues arise for attributed statements.
471
3.71k
      while (!isa<Expr>(ExprResult)) {
472
5
        if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
473
3
          EmitLabel(LS->getDecl());
474
3
          ExprResult = LS->getSubStmt();
475
3
        } else 
if (const auto *2
AS2
= dyn_cast<AttributedStmt>(ExprResult)) {
476
          // FIXME: Update this if we ever have attributes that affect the
477
          // semantics of an expression.
478
2
          ExprResult = AS->getSubStmt();
479
2
        } else {
480
0
          llvm_unreachable("unknown value statement");
481
0
        }
482
5
      }
483
484
3.70k
      EnsureInsertPoint();
485
486
3.70k
      const Expr *E = cast<Expr>(ExprResult);
487
3.70k
      QualType ExprTy = E->getType();
488
3.70k
      if (hasAggregateEvaluationKind(ExprTy)) {
489
647
        EmitAggExpr(E, AggSlot);
490
3.06k
      } else {
491
        // We can't return an RValue here because there might be cleanups at
492
        // the end of the StmtExpr.  Because of that, we have to emit the result
493
        // here into a temporary alloca.
494
3.06k
        RetAlloca = CreateMemTemp(ExprTy);
495
3.06k
        EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
496
3.06k
                         /*IsInit*/ false);
497
3.06k
      }
498
781k
    } else {
499
781k
      EmitStmt(CurStmt);
500
781k
    }
501
784k
  }
502
503
387k
  return RetAlloca;
504
387k
}
505
506
912
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
507
912
  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
508
509
  // If there is a cleanup stack, then we it isn't worth trying to
510
  // simplify this block (we would need to remove it from the scope map
511
  // and cleanup entry).
512
912
  if (!EHStack.empty())
513
135
    return;
514
515
  // Can only simplify direct branches.
516
777
  if (!BI || !BI->isUnconditional())
517
0
    return;
518
519
  // Can only simplify empty blocks.
520
777
  if (BI->getIterator() != BB->begin())
521
1
    return;
522
523
776
  BB->replaceAllUsesWith(BI->getSuccessor(0));
524
776
  BI->eraseFromParent();
525
776
  BB->eraseFromParent();
526
776
}
527
528
590k
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
529
590k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
530
531
  // Fall out of the current block (if necessary).
532
590k
  EmitBranch(BB);
533
534
590k
  if (IsFinished && 
BB->use_empty()155k
) {
535
690
    delete BB;
536
690
    return;
537
690
  }
538
539
  // Place the block after the current block, if possible, or else at
540
  // the end of the function.
541
590k
  if (CurBB && 
CurBB->getParent()377k
)
542
377k
    CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
543
212k
  else
544
212k
    CurFn->getBasicBlockList().push_back(BB);
545
590k
  Builder.SetInsertPoint(BB);
546
590k
}
547
548
770k
void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
549
  // Emit a branch from the current block to the target one if this
550
  // was a real block.  If this was just a fall-through block after a
551
  // terminator, don't emit it.
552
770k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
553
554
770k
  if (!CurBB || 
CurBB->getTerminator()533k
) {
555
    // If there is no insert point or the previous block is already
556
    // terminated, don't touch it.
557
493k
  } else {
558
    // Otherwise, create a fall-through branch.
559
277k
    Builder.CreateBr(Target);
560
277k
  }
561
562
770k
  Builder.ClearInsertionPoint();
563
770k
}
564
565
864
void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
566
864
  bool inserted = false;
567
864
  for (llvm::User *u : block->users()) {
568
864
    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
569
864
      CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
570
864
                                             block);
571
864
      inserted = true;
572
864
      break;
573
864
    }
574
864
  }
575
576
864
  if (!inserted)
577
0
    CurFn->getBasicBlockList().push_back(block);
578
579
864
  Builder.SetInsertPoint(block);
580
864
}
581
582
CodeGenFunction::JumpDest
583
3.22k
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
584
3.22k
  JumpDest &Dest = LabelMap[D];
585
3.22k
  if (Dest.isValid()) 
return Dest3.08k
;
586
587
  // Create, but don't insert, the new block.
588
142
  Dest = JumpDest(createBasicBlock(D->getName()),
589
142
                  EHScopeStack::stable_iterator::invalid(),
590
142
                  NextCleanupDestIndex++);
591
142
  return Dest;
592
3.22k
}
593
594
205
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
595
  // Add this label to the current lexical scope if we're within any
596
  // normal cleanups.  Jumps "in" to this label --- when permitted by
597
  // the language --- may need to be routed around such cleanups.
598
205
  if (EHStack.hasNormalCleanups() && 
CurLexicalScope10
)
599
0
    CurLexicalScope->addLabel(D);
600
601
205
  JumpDest &Dest = LabelMap[D];
602
603
  // If we didn't need a forward reference to this label, just go
604
  // ahead and create a destination at the current scope.
605
205
  if (!Dest.isValid()) {
606
63
    Dest = getJumpDestInCurrentScope(D->getName());
607
608
  // Otherwise, we need to give this label a target depth and remove
609
  // it from the branch-fixups list.
610
142
  } else {
611
142
    assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
612
0
    Dest.setScopeDepth(EHStack.stable_begin());
613
142
    ResolveBranchFixups(Dest.getBlock());
614
142
  }
615
616
0
  EmitBlock(Dest.getBlock());
617
618
  // Emit debug info for labels.
619
205
  if (CGDebugInfo *DI = getDebugInfo()) {
620
19
    if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
621
19
      DI->setLocation(D->getLocation());
622
19
      DI->EmitLabel(D, Builder);
623
19
    }
624
19
  }
625
626
205
  incrementProfileCounter(D->getStmt());
627
205
}
628
629
/// Change the cleanup scope of the labels in this lexical scope to
630
/// match the scope of the enclosing context.
631
0
void CodeGenFunction::LexicalScope::rescopeLabels() {
632
0
  assert(!Labels.empty());
633
0
  EHScopeStack::stable_iterator innermostScope
634
0
    = CGF.EHStack.getInnermostNormalCleanup();
635
636
  // Change the scope depth of all the labels.
637
0
  for (SmallVectorImpl<const LabelDecl*>::const_iterator
638
0
         i = Labels.begin(), e = Labels.end(); i != e; ++i) {
639
0
    assert(CGF.LabelMap.count(*i));
640
0
    JumpDest &dest = CGF.LabelMap.find(*i)->second;
641
0
    assert(dest.getScopeDepth().isValid());
642
0
    assert(innermostScope.encloses(dest.getScopeDepth()));
643
0
    dest.setScopeDepth(innermostScope);
644
0
  }
645
646
  // Reparent the labels if the new scope also has cleanups.
647
0
  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
648
0
    ParentScope->Labels.append(Labels.begin(), Labels.end());
649
0
  }
650
0
}
651
652
653
202
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
654
202
  EmitLabel(S.getDecl());
655
656
  // IsEHa - emit eha.scope.begin if it's a side entry of a scope
657
202
  if (getLangOpts().EHAsynch && 
S.isSideEntry()0
)
658
0
    EmitSehCppScopeBegin();
659
660
202
  EmitStmt(S.getSubStmt());
661
202
}
662
663
274
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
664
274
  bool nomerge = false;
665
274
  const CallExpr *musttail = nullptr;
666
667
346
  for (const auto *A : S.getAttrs()) {
668
346
    if (A->getKind() == attr::NoMerge) {
669
7
      nomerge = true;
670
7
    }
671
346
    if (A->getKind() == attr::MustTail) {
672
50
      const Stmt *Sub = S.getSubStmt();
673
50
      const ReturnStmt *R = cast<ReturnStmt>(Sub);
674
50
      musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
675
50
    }
676
346
  }
677
274
  SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
678
274
  SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
679
274
  EmitStmt(S.getSubStmt(), S.getAttrs());
680
274
}
681
682
3.12k
void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
683
  // If this code is reachable then emit a stop point (if generating
684
  // debug info). We have to do this ourselves because we are on the
685
  // "simple" statement path.
686
3.12k
  if (HaveInsertPoint())
687
3.11k
    EmitStopPoint(&S);
688
689
3.12k
  EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
690
3.12k
}
691
692
693
22
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
694
22
  if (const LabelDecl *Target = S.getConstantTarget()) {
695
0
    EmitBranchThroughCleanup(getJumpDestForLabel(Target));
696
0
    return;
697
0
  }
698
699
  // Ensure that we have an i8* for our PHI node.
700
22
  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
701
22
                                         Int8PtrTy, "addr");
702
22
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
703
704
  // Get the basic block for the indirect goto.
705
22
  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
706
707
  // The first instruction in the block has to be the PHI for the switch dest,
708
  // add an entry for this branch.
709
22
  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
710
711
22
  EmitBranch(IndGotoBB);
712
22
}
713
714
116k
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
715
  // C99 6.8.4.1: The first substatement is executed if the expression compares
716
  // unequal to 0.  The condition must be a scalar type.
717
116k
  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
718
719
116k
  if (S.getInit())
720
14
    EmitStmt(S.getInit());
721
722
116k
  if (S.getConditionVariable())
723
28
    EmitDecl(*S.getConditionVariable());
724
725
  // If the condition constant folds and can be elided, try to avoid emitting
726
  // the condition and the dead arm of the if/else.
727
116k
  bool CondConstant;
728
116k
  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
729
116k
                                   S.isConstexpr())) {
730
    // Figure out which block (then or else) is executed.
731
1.47k
    const Stmt *Executed = S.getThen();
732
1.47k
    const Stmt *Skipped  = S.getElse();
733
1.47k
    if (!CondConstant)  // Condition false?
734
1.35k
      std::swap(Executed, Skipped);
735
736
    // If the skipped block has no labels in it, just emit the executed block.
737
    // This avoids emitting dead code and simplifies the CFG substantially.
738
1.47k
    if (S.isConstexpr() || 
!ContainsLabel(Skipped)1.46k
) {
739
1.47k
      if (CondConstant)
740
120
        incrementProfileCounter(&S);
741
1.47k
      if (Executed) {
742
1.35k
        RunCleanupsScope ExecutedScope(*this);
743
1.35k
        EmitStmt(Executed);
744
1.35k
      }
745
1.47k
      return;
746
1.47k
    }
747
1.47k
  }
748
749
  // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
750
  // the conditional branch.
751
115k
  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
752
115k
  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
753
115k
  llvm::BasicBlock *ElseBlock = ContBlock;
754
115k
  if (S.getElse())
755
14.2k
    ElseBlock = createBasicBlock("if.else");
756
757
  // Prefer the PGO based weights over the likelihood attribute.
758
  // When the build isn't optimized the metadata isn't used, so don't generate
759
  // it.
760
115k
  Stmt::Likelihood LH = Stmt::LH_None;
761
115k
  uint64_t Count = getProfileCount(S.getThen());
762
115k
  if (!Count && 
CGM.getCodeGenOpts().OptimizationLevel115k
)
763
3.54k
    LH = Stmt::getLikelihood(S.getThen(), S.getElse());
764
115k
  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
765
766
  // Emit the 'then' code.
767
115k
  EmitBlock(ThenBlock);
768
115k
  incrementProfileCounter(&S);
769
115k
  {
770
115k
    RunCleanupsScope ThenScope(*this);
771
115k
    EmitStmt(S.getThen());
772
115k
  }
773
115k
  EmitBranch(ContBlock);
774
775
  // Emit the 'else' code if present.
776
115k
  if (const Stmt *Else = S.getElse()) {
777
14.2k
    {
778
      // There is no need to emit line number for an unconditional branch.
779
14.2k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
780
14.2k
      EmitBlock(ElseBlock);
781
14.2k
    }
782
14.2k
    {
783
14.2k
      RunCleanupsScope ElseScope(*this);
784
14.2k
      EmitStmt(Else);
785
14.2k
    }
786
14.2k
    {
787
      // There is no need to emit line number for an unconditional branch.
788
14.2k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
789
14.2k
      EmitBranch(ContBlock);
790
14.2k
    }
791
14.2k
  }
792
793
  // Emit the continuation block for code after the if.
794
115k
  EmitBlock(ContBlock, true);
795
115k
}
796
797
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
798
1.89k
                                    ArrayRef<const Attr *> WhileAttrs) {
799
  // Emit the header for the loop, which will also become
800
  // the continue target.
801
1.89k
  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
802
1.89k
  EmitBlock(LoopHeader.getBlock());
803
804
  // Create an exit block for when the condition fails, which will
805
  // also become the break target.
806
1.89k
  JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
807
808
  // Store the blocks to use for break and continue.
809
1.89k
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
810
811
  // C++ [stmt.while]p2:
812
  //   When the condition of a while statement is a declaration, the
813
  //   scope of the variable that is declared extends from its point
814
  //   of declaration (3.3.2) to the end of the while statement.
815
  //   [...]
816
  //   The object created in a condition is destroyed and created
817
  //   with each iteration of the loop.
818
1.89k
  RunCleanupsScope ConditionScope(*this);
819
820
1.89k
  if (S.getConditionVariable())
821
5
    EmitDecl(*S.getConditionVariable());
822
823
  // Evaluate the conditional in the while header.  C99 6.8.5.1: The
824
  // evaluation of the controlling expression takes place before each
825
  // execution of the loop body.
826
1.89k
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
827
828
  // while(1) is common, avoid extra exit blocks.  Be sure
829
  // to correctly handle break/continue though.
830
1.89k
  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
831
1.89k
  bool CondIsConstInt = C != nullptr;
832
1.89k
  bool EmitBoolCondBranch = !CondIsConstInt || 
!C->isOne()465
;
833
1.89k
  const SourceRange &R = S.getSourceRange();
834
1.89k
  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
835
1.89k
                 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
836
1.89k
                 SourceLocToDebugLoc(R.getEnd()),
837
1.89k
                 checkIfLoopMustProgress(CondIsConstInt));
838
839
  // As long as the condition is true, go to the loop body.
840
1.89k
  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
841
1.89k
  if (EmitBoolCondBranch) {
842
1.44k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
843
1.44k
    if (ConditionScope.requiresCleanups())
844
3
      ExitBlock = createBasicBlock("while.exit");
845
1.44k
    llvm::MDNode *Weights =
846
1.44k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
847
1.44k
    if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel1.42k
)
848
49
      BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
849
49
          BoolCondVal, Stmt::getLikelihood(S.getBody()));
850
1.44k
    Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
851
852
1.44k
    if (ExitBlock != LoopExit.getBlock()) {
853
3
      EmitBlock(ExitBlock);
854
3
      EmitBranchThroughCleanup(LoopExit);
855
3
    }
856
1.44k
  } else 
if (const Attr *449
A449
= Stmt::getLikelihoodAttr(S.getBody())) {
857
2
    CGM.getDiags().Report(A->getLocation(),
858
2
                          diag::warn_attribute_has_no_effect_on_infinite_loop)
859
2
        << A << A->getRange();
860
2
    CGM.getDiags().Report(
861
2
        S.getWhileLoc(),
862
2
        diag::note_attribute_has_no_effect_on_infinite_loop_here)
863
2
        << SourceRange(S.getWhileLoc(), S.getRParenLoc());
864
2
  }
865
866
  // Emit the loop body.  We have to emit this in a cleanup scope
867
  // because it might be a singleton DeclStmt.
868
1.89k
  {
869
1.89k
    RunCleanupsScope BodyScope(*this);
870
1.89k
    EmitBlock(LoopBody);
871
1.89k
    incrementProfileCounter(&S);
872
1.89k
    EmitStmt(S.getBody());
873
1.89k
  }
874
875
1.89k
  BreakContinueStack.pop_back();
876
877
  // Immediately force cleanup.
878
1.89k
  ConditionScope.ForceCleanup();
879
880
1.89k
  EmitStopPoint(&S);
881
  // Branch to the loop header again.
882
1.89k
  EmitBranch(LoopHeader.getBlock());
883
884
1.89k
  LoopStack.pop();
885
886
  // Emit the exit block.
887
1.89k
  EmitBlock(LoopExit.getBlock(), true);
888
889
  // The LoopHeader typically is just a branch if we skipped emitting
890
  // a branch, try to erase it.
891
1.89k
  if (!EmitBoolCondBranch)
892
449
    SimplifyForwardingBlocks(LoopHeader.getBlock());
893
1.89k
}
894
895
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
896
752
                                 ArrayRef<const Attr *> DoAttrs) {
897
752
  JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
898
752
  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
899
900
752
  uint64_t ParentCount = getCurrentProfileCount();
901
902
  // Store the blocks to use for break and continue.
903
752
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
904
905
  // Emit the body of the loop.
906
752
  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
907
908
752
  EmitBlockWithFallThrough(LoopBody, &S);
909
752
  {
910
752
    RunCleanupsScope BodyScope(*this);
911
752
    EmitStmt(S.getBody());
912
752
  }
913
914
752
  EmitBlock(LoopCond.getBlock());
915
916
  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
917
  // after each execution of the loop body."
918
919
  // Evaluate the conditional in the while header.
920
  // C99 6.8.5p2/p4: The first substatement is executed if the expression
921
  // compares unequal to 0.  The condition must be a scalar type.
922
752
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
923
924
752
  BreakContinueStack.pop_back();
925
926
  // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
927
  // to correctly handle break/continue though.
928
752
  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
929
752
  bool CondIsConstInt = C;
930
752
  bool EmitBoolCondBranch = !C || 
!C->isZero()545
;
931
932
752
  const SourceRange &R = S.getSourceRange();
933
752
  LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
934
752
                 SourceLocToDebugLoc(R.getBegin()),
935
752
                 SourceLocToDebugLoc(R.getEnd()),
936
752
                 checkIfLoopMustProgress(CondIsConstInt));
937
938
  // As long as the condition is true, iterate the loop.
939
752
  if (EmitBoolCondBranch) {
940
289
    uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
941
289
    Builder.CreateCondBr(
942
289
        BoolCondVal, LoopBody, LoopExit.getBlock(),
943
289
        createProfileWeightsForLoop(S.getCond(), BackedgeCount));
944
289
  }
945
946
752
  LoopStack.pop();
947
948
  // Emit the exit block.
949
752
  EmitBlock(LoopExit.getBlock());
950
951
  // The DoCond block typically is just a branch if we skipped
952
  // emitting a branch, try to erase it.
953
752
  if (!EmitBoolCondBranch)
954
463
    SimplifyForwardingBlocks(LoopCond.getBlock());
955
752
}
956
957
void CodeGenFunction::EmitForStmt(const ForStmt &S,
958
17.1k
                                  ArrayRef<const Attr *> ForAttrs) {
959
17.1k
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
960
961
17.1k
  LexicalScope ForScope(*this, S.getSourceRange());
962
963
  // Evaluate the first part before the loop.
964
17.1k
  if (S.getInit())
965
16.5k
    EmitStmt(S.getInit());
966
967
  // Start the loop with a block that tests the condition.
968
  // If there's an increment, the continue scope will be overwritten
969
  // later.
970
17.1k
  JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
971
17.1k
  llvm::BasicBlock *CondBlock = CondDest.getBlock();
972
17.1k
  EmitBlock(CondBlock);
973
974
17.1k
  Expr::EvalResult Result;
975
17.1k
  bool CondIsConstInt =
976
17.1k
      !S.getCond() || 
S.getCond()->EvaluateAsInt(Result, getContext())17.0k
;
977
978
17.1k
  const SourceRange &R = S.getSourceRange();
979
17.1k
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
980
17.1k
                 SourceLocToDebugLoc(R.getBegin()),
981
17.1k
                 SourceLocToDebugLoc(R.getEnd()),
982
17.1k
                 checkIfLoopMustProgress(CondIsConstInt));
983
984
  // Create a cleanup scope for the condition variable cleanups.
985
17.1k
  LexicalScope ConditionScope(*this, S.getSourceRange());
986
987
  // If the for loop doesn't have an increment we can just use the condition as
988
  // the continue block. Otherwise, if there is no condition variable, we can
989
  // form the continue block now. If there is a condition variable, we can't
990
  // form the continue block until after we've emitted the condition, because
991
  // the condition is in scope in the increment, but Sema's jump diagnostics
992
  // ensure that there are no continues from the condition variable that jump
993
  // to the loop increment.
994
17.1k
  JumpDest Continue;
995
17.1k
  if (!S.getInc())
996
275
    Continue = CondDest;
997
16.8k
  else if (!S.getConditionVariable())
998
16.8k
    Continue = getJumpDestInCurrentScope("for.inc");
999
17.1k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1000
1001
17.1k
  if (S.getCond()) {
1002
    // If the for statement has a condition scope, emit the local variable
1003
    // declaration.
1004
17.0k
    if (S.getConditionVariable()) {
1005
7
      EmitDecl(*S.getConditionVariable());
1006
1007
      // We have entered the condition variable's scope, so we're now able to
1008
      // jump to the continue block.
1009
7
      Continue = S.getInc() ? 
getJumpDestInCurrentScope("for.inc")6
:
CondDest1
;
1010
7
      BreakContinueStack.back().ContinueBlock = Continue;
1011
7
    }
1012
1013
17.0k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1014
    // If there are any cleanups between here and the loop-exit scope,
1015
    // create a block to stage a loop exit along.
1016
17.0k
    if (ForScope.requiresCleanups())
1017
139
      ExitBlock = createBasicBlock("for.cond.cleanup");
1018
1019
    // As long as the condition is true, iterate the loop.
1020
17.0k
    llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1021
1022
    // C99 6.8.5p2/p4: The first substatement is executed if the expression
1023
    // compares unequal to 0.  The condition must be a scalar type.
1024
17.0k
    llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1025
17.0k
    llvm::MDNode *Weights =
1026
17.0k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1027
17.0k
    if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel16.9k
)
1028
146
      BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1029
146
          BoolCondVal, Stmt::getLikelihood(S.getBody()));
1030
1031
17.0k
    Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1032
1033
17.0k
    if (ExitBlock != LoopExit.getBlock()) {
1034
139
      EmitBlock(ExitBlock);
1035
139
      EmitBranchThroughCleanup(LoopExit);
1036
139
    }
1037
1038
17.0k
    EmitBlock(ForBody);
1039
17.0k
  } else {
1040
    // Treat it as a non-zero constant.  Don't even create a new block for the
1041
    // body, just fall into it.
1042
62
  }
1043
17.1k
  incrementProfileCounter(&S);
1044
1045
17.1k
  {
1046
    // Create a separate cleanup scope for the body, in case it is not
1047
    // a compound statement.
1048
17.1k
    RunCleanupsScope BodyScope(*this);
1049
17.1k
    EmitStmt(S.getBody());
1050
17.1k
  }
1051
1052
  // If there is an increment, emit it next.
1053
17.1k
  if (S.getInc()) {
1054
16.8k
    EmitBlock(Continue.getBlock());
1055
16.8k
    EmitStmt(S.getInc());
1056
16.8k
  }
1057
1058
17.1k
  BreakContinueStack.pop_back();
1059
1060
17.1k
  ConditionScope.ForceCleanup();
1061
1062
17.1k
  EmitStopPoint(&S);
1063
17.1k
  EmitBranch(CondBlock);
1064
1065
17.1k
  ForScope.ForceCleanup();
1066
1067
17.1k
  LoopStack.pop();
1068
1069
  // Emit the fall-through block.
1070
17.1k
  EmitBlock(LoopExit.getBlock(), true);
1071
17.1k
}
1072
1073
void
1074
CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1075
128
                                     ArrayRef<const Attr *> ForAttrs) {
1076
128
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1077
1078
128
  LexicalScope ForScope(*this, S.getSourceRange());
1079
1080
  // Evaluate the first pieces before the loop.
1081
128
  if (S.getInit())
1082
2
    EmitStmt(S.getInit());
1083
128
  EmitStmt(S.getRangeStmt());
1084
128
  EmitStmt(S.getBeginStmt());
1085
128
  EmitStmt(S.getEndStmt());
1086
1087
  // Start the loop with a block that tests the condition.
1088
  // If there's an increment, the continue scope will be overwritten
1089
  // later.
1090
128
  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1091
128
  EmitBlock(CondBlock);
1092
1093
128
  const SourceRange &R = S.getSourceRange();
1094
128
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1095
128
                 SourceLocToDebugLoc(R.getBegin()),
1096
128
                 SourceLocToDebugLoc(R.getEnd()));
1097
1098
  // If there are any cleanups between here and the loop-exit scope,
1099
  // create a block to stage a loop exit along.
1100
128
  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1101
128
  if (ForScope.requiresCleanups())
1102
19
    ExitBlock = createBasicBlock("for.cond.cleanup");
1103
1104
  // The loop body, consisting of the specified body and the loop variable.
1105
128
  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1106
1107
  // The body is executed if the expression, contextually converted
1108
  // to bool, is true.
1109
128
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1110
128
  llvm::MDNode *Weights =
1111
128
      createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1112
128
  if (!Weights && 
CGM.getCodeGenOpts().OptimizationLevel127
)
1113
12
    BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1114
12
        BoolCondVal, Stmt::getLikelihood(S.getBody()));
1115
128
  Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1116
1117
128
  if (ExitBlock != LoopExit.getBlock()) {
1118
19
    EmitBlock(ExitBlock);
1119
19
    EmitBranchThroughCleanup(LoopExit);
1120
19
  }
1121
1122
128
  EmitBlock(ForBody);
1123
128
  incrementProfileCounter(&S);
1124
1125
  // Create a block for the increment. In case of a 'continue', we jump there.
1126
128
  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1127
1128
  // Store the blocks to use for break and continue.
1129
128
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1130
1131
128
  {
1132
    // Create a separate cleanup scope for the loop variable and body.
1133
128
    LexicalScope BodyScope(*this, S.getSourceRange());
1134
128
    EmitStmt(S.getLoopVarStmt());
1135
128
    EmitStmt(S.getBody());
1136
128
  }
1137
1138
128
  EmitStopPoint(&S);
1139
  // If there is an increment, emit it next.
1140
128
  EmitBlock(Continue.getBlock());
1141
128
  EmitStmt(S.getInc());
1142
1143
128
  BreakContinueStack.pop_back();
1144
1145
128
  EmitBranch(CondBlock);
1146
1147
128
  ForScope.ForceCleanup();
1148
1149
128
  LoopStack.pop();
1150
1151
  // Emit the fall-through block.
1152
128
  EmitBlock(LoopExit.getBlock(), true);
1153
128
}
1154
1155
457
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1156
457
  if (RV.isScalar()) {
1157
455
    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1158
455
  } else 
if (2
RV.isAggregate()2
) {
1159
2
    LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1160
2
    LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1161
2
    EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1162
2
  } else {
1163
0
    EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1164
0
                       /*init*/ true);
1165
0
  }
1166
457
  EmitBranchThroughCleanup(ReturnBlock);
1167
457
}
1168
1169
namespace {
1170
// RAII struct used to save and restore a return statment's result expression.
1171
struct SaveRetExprRAII {
1172
  SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1173
215k
      : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1174
215k
    CGF.RetExpr = RetExpr;
1175
215k
  }
1176
215k
  ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1177
  const Expr *OldRetExpr;
1178
  CodeGenFunction &CGF;
1179
};
1180
} // namespace
1181
1182
/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1183
/// codegen it as 'tail call ...; ret void;'.
1184
static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1185
22.6k
                                     const CGFunctionInfo *CurFnInfo) {
1186
22.6k
  auto calleeQualType = CE->getCallee()->getType();
1187
22.6k
  const FunctionType *calleeType = nullptr;
1188
22.6k
  if (calleeQualType->isFunctionPointerType() ||
1189
22.6k
      
calleeQualType->isFunctionReferenceType()46
||
1190
22.6k
      
calleeQualType->isBlockPointerType()46
||
1191
22.6k
      
calleeQualType->isMemberFunctionPointerType()46
) {
1192
22.5k
    calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1193
22.5k
  } else 
if (auto *46
ty46
= dyn_cast<FunctionType>(calleeQualType)) {
1194
1
    calleeType = ty;
1195
45
  } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1196
45
    if (auto methodDecl = CMCE->getMethodDecl()) {
1197
      // getMethodDecl() doesn't handle member pointers at the moment.
1198
35
      calleeType = methodDecl->getType()->castAs<FunctionType>();
1199
35
    } else {
1200
10
      return;
1201
10
    }
1202
45
  } else {
1203
0
    return;
1204
0
  }
1205
22.6k
  if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1206
22.6k
      
(CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)150
) {
1207
150
    auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1208
150
    CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1209
150
    Builder.CreateRetVoid();
1210
150
    Builder.ClearInsertionPoint();
1211
150
  }
1212
22.6k
}
1213
1214
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1215
/// if the function returns void, or may be missing one if the function returns
1216
/// non-void.  Fun stuff :).
1217
215k
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1218
215k
  if (requiresReturnValueCheck()) {
1219
15
    llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1220
15
    auto *SLocPtr =
1221
15
        new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1222
15
                                 llvm::GlobalVariable::PrivateLinkage, SLoc);
1223
15
    SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1224
15
    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1225
15
    assert(ReturnLocation.isValid() && "No valid return location");
1226
0
    Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1227
15
                        ReturnLocation);
1228
15
  }
1229
1230
  // Returning from an outlined SEH helper is UB, and we already warn on it.
1231
215k
  if (IsOutlinedSEHHelper) {
1232
8
    Builder.CreateUnreachable();
1233
8
    Builder.ClearInsertionPoint();
1234
8
  }
1235
1236
  // Emit the result value, even if unused, to evaluate the side effects.
1237
215k
  const Expr *RV = S.getRetValue();
1238
1239
  // Record the result expression of the return statement. The recorded
1240
  // expression is used to determine whether a block capture's lifetime should
1241
  // end at the end of the full expression as opposed to the end of the scope
1242
  // enclosing the block expression.
1243
  //
1244
  // This permits a small, easily-implemented exception to our over-conservative
1245
  // rules about not jumping to statements following block literals with
1246
  // non-trivial cleanups.
1247
215k
  SaveRetExprRAII SaveRetExpr(RV, *this);
1248
1249
215k
  RunCleanupsScope cleanupScope(*this);
1250
215k
  if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1251
5.65k
    RV = EWC->getSubExpr();
1252
  // FIXME: Clean this up by using an LValue for ReturnTemp,
1253
  // EmitStoreThroughLValue, and EmitAnyExpr.
1254
  // Check if the NRVO candidate was not globalized in OpenMP mode.
1255
215k
  if (getLangOpts().ElideConstructors && 
S.getNRVOCandidate()215k
&&
1256
215k
      
S.getNRVOCandidate()->isNRVOVariable()1.39k
&&
1257
215k
      
(1.38k
!getLangOpts().OpenMP1.38k
||
1258
1.38k
       !CGM.getOpenMPRuntime()
1259
92
            .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1260
1.36k
            .isValid())) {
1261
    // Apply the named return value optimization for this return statement,
1262
    // which means doing nothing: the appropriate result has already been
1263
    // constructed into the NRVO variable.
1264
1265
    // If there is an NRVO flag for this variable, set it to 1 into indicate
1266
    // that the cleanup code should not destroy the variable.
1267
1.36k
    if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1268
218
      Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1269
214k
  } else if (!ReturnValue.isValid() || 
(190k
RV190k
&&
RV->getType()->isVoidType()190k
)) {
1270
    // Make sure not to return anything, but evaluate the expression
1271
    // for side effects.
1272
23.5k
    if (RV) {
1273
22.6k
      EmitAnyExpr(RV);
1274
22.6k
      if (auto *CE = dyn_cast<CallExpr>(RV))
1275
22.6k
        makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1276
22.6k
    }
1277
190k
  } else if (!RV) {
1278
    // Do nothing (return value is left uninitialized)
1279
190k
  } else if (FnRetTy->isReferenceType()) {
1280
    // If this function returns a reference, take the address of the expression
1281
    // rather than the value.
1282
23.5k
    RValue Result = EmitReferenceBindingToExpr(RV);
1283
23.5k
    Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1284
167k
  } else {
1285
167k
    switch (getEvaluationKind(RV->getType())) {
1286
160k
    case TEK_Scalar:
1287
160k
      Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1288
160k
      break;
1289
713
    case TEK_Complex:
1290
713
      EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1291
713
                                /*isInit*/ true);
1292
713
      break;
1293
6.32k
    case TEK_Aggregate:
1294
6.32k
      EmitAggExpr(RV, AggValueSlot::forAddr(
1295
6.32k
                          ReturnValue, Qualifiers(),
1296
6.32k
                          AggValueSlot::IsDestructed,
1297
6.32k
                          AggValueSlot::DoesNotNeedGCBarriers,
1298
6.32k
                          AggValueSlot::IsNotAliased,
1299
6.32k
                          getOverlapForReturnValue()));
1300
6.32k
      break;
1301
167k
    }
1302
167k
  }
1303
1304
215k
  ++NumReturnExprs;
1305
215k
  if (!RV || 
RV->isEvaluatable(getContext())214k
)
1306
15.5k
    ++NumSimpleReturnExprs;
1307
1308
215k
  cleanupScope.ForceCleanup();
1309
215k
  EmitBranchThroughCleanup(ReturnBlock);
1310
215k
}
1311
1312
201k
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1313
  // As long as debug info is modeled with instructions, we have to ensure we
1314
  // have a place to insert here and write the stop point here.
1315
201k
  if (HaveInsertPoint())
1316
201k
    EmitStopPoint(&S);
1317
1318
201k
  for (const auto *I : S.decls())
1319
203k
    EmitDecl(*I);
1320
201k
}
1321
1322
5.70k
void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1323
5.70k
  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1324
1325
  // If this code is reachable then emit a stop point (if generating
1326
  // debug info). We have to do this ourselves because we are on the
1327
  // "simple" statement path.
1328
5.70k
  if (HaveInsertPoint())
1329
5.70k
    EmitStopPoint(&S);
1330
1331
5.70k
  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1332
5.70k
}
1333
1334
10.8k
void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1335
10.8k
  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1336
1337
  // If this code is reachable then emit a stop point (if generating
1338
  // debug info). We have to do this ourselves because we are on the
1339
  // "simple" statement path.
1340
10.8k
  if (HaveInsertPoint())
1341
10.8k
    EmitStopPoint(&S);
1342
1343
10.8k
  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1344
10.8k
}
1345
1346
/// EmitCaseStmtRange - If case statement range is not too big then
1347
/// add multiple cases to switch instruction, one for each value within
1348
/// the range. If range is too big then emit "if" condition check.
1349
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1350
43
                                        ArrayRef<const Attr *> Attrs) {
1351
43
  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1352
1353
0
  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1354
43
  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1355
1356
  // Emit the code for this case. We do this first to make sure it is
1357
  // properly chained from our predecessor before generating the
1358
  // switch machinery to enter this block.
1359
43
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1360
43
  EmitBlockWithFallThrough(CaseDest, &S);
1361
43
  EmitStmt(S.getSubStmt());
1362
1363
  // If range is empty, do nothing.
1364
43
  if (LHS.isSigned() ? 
RHS.slt(LHS)39
:
RHS.ult(LHS)4
)
1365
4
    return;
1366
1367
39
  Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1368
39
  llvm::APInt Range = RHS - LHS;
1369
  // FIXME: parameters such as this should not be hardcoded.
1370
39
  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1371
    // Range is small enough to add multiple switch instruction cases.
1372
22
    uint64_t Total = getProfileCount(&S);
1373
22
    unsigned NCases = Range.getZExtValue() + 1;
1374
    // We only have one region counter for the entire set of cases here, so we
1375
    // need to divide the weights evenly between the generated cases, ensuring
1376
    // that the total weight is preserved. E.g., a weight of 5 over three cases
1377
    // will be distributed as weights of 2, 2, and 1.
1378
22
    uint64_t Weight = Total / NCases, Rem = Total % NCases;
1379
103
    for (unsigned I = 0; I != NCases; 
++I81
) {
1380
81
      if (SwitchWeights)
1381
24
        SwitchWeights->push_back(Weight + (Rem ? 
14
:
020
));
1382
57
      else if (SwitchLikelihood)
1383
35
        SwitchLikelihood->push_back(LH);
1384
1385
81
      if (Rem)
1386
4
        Rem--;
1387
81
      SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1388
81
      ++LHS;
1389
81
    }
1390
22
    return;
1391
22
  }
1392
1393
  // The range is too big. Emit "if" condition into a new block,
1394
  // making sure to save and restore the current insertion point.
1395
17
  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1396
1397
  // Push this test onto the chain of range checks (which terminates
1398
  // in the default basic block). The switch's default will be changed
1399
  // to the top of this chain after switch emission is complete.
1400
17
  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1401
17
  CaseRangeBlock = createBasicBlock("sw.caserange");
1402
1403
17
  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1404
17
  Builder.SetInsertPoint(CaseRangeBlock);
1405
1406
  // Emit range check.
1407
17
  llvm::Value *Diff =
1408
17
    Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1409
17
  llvm::Value *Cond =
1410
17
    Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1411
1412
17
  llvm::MDNode *Weights = nullptr;
1413
17
  if (SwitchWeights) {
1414
8
    uint64_t ThisCount = getProfileCount(&S);
1415
8
    uint64_t DefaultCount = (*SwitchWeights)[0];
1416
8
    Weights = createProfileWeights(ThisCount, DefaultCount);
1417
1418
    // Since we're chaining the switch default through each large case range, we
1419
    // need to update the weight for the default, ie, the first case, to include
1420
    // this case.
1421
8
    (*SwitchWeights)[0] += ThisCount;
1422
9
  } else if (SwitchLikelihood)
1423
7
    Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1424
1425
17
  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1426
1427
  // Restore the appropriate insertion point.
1428
17
  if (RestoreBB)
1429
13
    Builder.SetInsertPoint(RestoreBB);
1430
4
  else
1431
4
    Builder.ClearInsertionPoint();
1432
17
}
1433
1434
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1435
995
                                   ArrayRef<const Attr *> Attrs) {
1436
  // If there is no enclosing switch instance that we're aware of, then this
1437
  // case statement and its block can be elided.  This situation only happens
1438
  // when we've constant-folded the switch, are emitting the constant case,
1439
  // and part of the constant case includes another case statement.  For
1440
  // instance: switch (4) { case 4: do { case 5: } while (1); }
1441
995
  if (!SwitchInsn) {
1442
2
    EmitStmt(S.getSubStmt());
1443
2
    return;
1444
2
  }
1445
1446
  // Handle case ranges.
1447
993
  if (S.getRHS()) {
1448
43
    EmitCaseStmtRange(S, Attrs);
1449
43
    return;
1450
43
  }
1451
1452
950
  llvm::ConstantInt *CaseVal =
1453
950
    Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1454
950
  if (SwitchLikelihood)
1455
116
    SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1456
1457
  // If the body of the case is just a 'break', try to not emit an empty block.
1458
  // If we're profiling or we're not optimizing, leave the block in for better
1459
  // debug and coverage analysis.
1460
950
  if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1461
950
      
CGM.getCodeGenOpts().OptimizationLevel > 0920
&&
1462
950
      
isa<BreakStmt>(S.getSubStmt())116
) {
1463
15
    JumpDest Block = BreakContinueStack.back().BreakBlock;
1464
1465
    // Only do this optimization if there are no cleanups that need emitting.
1466
15
    if (isObviouslyBranchWithoutCleanups(Block)) {
1467
15
      if (SwitchWeights)
1468
0
        SwitchWeights->push_back(getProfileCount(&S));
1469
15
      SwitchInsn->addCase(CaseVal, Block.getBlock());
1470
1471
      // If there was a fallthrough into this case, make sure to redirect it to
1472
      // the end of the switch as well.
1473
15
      if (Builder.GetInsertBlock()) {
1474
0
        Builder.CreateBr(Block.getBlock());
1475
0
        Builder.ClearInsertionPoint();
1476
0
      }
1477
15
      return;
1478
15
    }
1479
15
  }
1480
1481
935
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1482
935
  EmitBlockWithFallThrough(CaseDest, &S);
1483
935
  if (SwitchWeights)
1484
47
    SwitchWeights->push_back(getProfileCount(&S));
1485
935
  SwitchInsn->addCase(CaseVal, CaseDest);
1486
1487
  // Recursively emitting the statement is acceptable, but is not wonderful for
1488
  // code where we have many case statements nested together, i.e.:
1489
  //  case 1:
1490
  //    case 2:
1491
  //      case 3: etc.
1492
  // Handling this recursively will create a new block for each case statement
1493
  // that falls through to the next case which is IR intensive.  It also causes
1494
  // deep recursion which can run into stack depth limitations.  Handle
1495
  // sequential non-range case statements specially.
1496
  //
1497
  // TODO When the next case has a likelihood attribute the code returns to the
1498
  // recursive algorithm. Maybe improve this case if it becomes common practice
1499
  // to use a lot of attributes.
1500
935
  const CaseStmt *CurCase = &S;
1501
935
  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1502
1503
  // Otherwise, iteratively add consecutive cases to this switch stmt.
1504
1.05k
  while (NextCase && 
NextCase->getRHS() == nullptr122
) {
1505
116
    CurCase = NextCase;
1506
116
    llvm::ConstantInt *CaseVal =
1507
116
      Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1508
1509
116
    if (SwitchWeights)
1510
0
      SwitchWeights->push_back(getProfileCount(NextCase));
1511
116
    if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1512
3
      CaseDest = createBasicBlock("sw.bb");
1513
3
      EmitBlockWithFallThrough(CaseDest, CurCase);
1514
3
    }
1515
    // Since this loop is only executed when the CaseStmt has no attributes
1516
    // use a hard-coded value.
1517
116
    if (SwitchLikelihood)
1518
14
      SwitchLikelihood->push_back(Stmt::LH_None);
1519
1520
116
    SwitchInsn->addCase(CaseVal, CaseDest);
1521
116
    NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1522
116
  }
1523
1524
  // Generate a stop point for debug info if the case statement is
1525
  // followed by a default statement. A fallthrough case before a
1526
  // default case gets its own branch target.
1527
935
  if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1528
2
    EmitStopPoint(CurCase);
1529
1530
  // Normal default recursion for non-cases.
1531
935
  EmitStmt(CurCase->getSubStmt());
1532
935
}
1533
1534
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1535
186
                                      ArrayRef<const Attr *> Attrs) {
1536
  // If there is no enclosing switch instance that we're aware of, then this
1537
  // default statement can be elided. This situation only happens when we've
1538
  // constant-folded the switch.
1539
186
  if (!SwitchInsn) {
1540
1
    EmitStmt(S.getSubStmt());
1541
1
    return;
1542
1
  }
1543
1544
185
  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1545
185
  assert(DefaultBlock->empty() &&
1546
185
         "EmitDefaultStmt: Default block already defined?");
1547
1548
185
  if (SwitchLikelihood)
1549
33
    SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1550
1551
185
  EmitBlockWithFallThrough(DefaultBlock, &S);
1552
1553
185
  EmitStmt(S.getSubStmt());
1554
185
}
1555
1556
/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1557
/// constant value that is being switched on, see if we can dead code eliminate
1558
/// the body of the switch to a simple series of statements to emit.  Basically,
1559
/// on a switch (5) we want to find these statements:
1560
///    case 5:
1561
///      printf(...);    <--
1562
///      ++i;            <--
1563
///      break;
1564
///
1565
/// and add them to the ResultStmts vector.  If it is unsafe to do this
1566
/// transformation (for example, one of the elided statements contains a label
1567
/// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1568
/// should include statements after it (e.g. the printf() line is a substmt of
1569
/// the case) then return CSFC_FallThrough.  If we handled it and found a break
1570
/// statement, then return CSFC_Success.
1571
///
1572
/// If Case is non-null, then we are looking for the specified case, checking
1573
/// that nothing we jump over contains labels.  If Case is null, then we found
1574
/// the case and are looking for the break.
1575
///
1576
/// If the recursive walk actually finds our Case, then we set FoundCase to
1577
/// true.
1578
///
1579
enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1580
static CSFC_Result CollectStatementsForCase(const Stmt *S,
1581
                                            const SwitchCase *Case,
1582
                                            bool &FoundCase,
1583
497
                              SmallVectorImpl<const Stmt*> &ResultStmts) {
1584
  // If this is a null statement, just succeed.
1585
497
  if (!S)
1586
0
    return Case ? CSFC_Success : CSFC_FallThrough;
1587
1588
  // If this is the switchcase (case 4: or default) that we're looking for, then
1589
  // we're in business.  Just add the substatement.
1590
497
  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1591
146
    if (S == Case) {
1592
78
      FoundCase = true;
1593
78
      return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1594
78
                                      ResultStmts);
1595
78
    }
1596
1597
    // Otherwise, this is some other case or default statement, just ignore it.
1598
68
    return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1599
68
                                    ResultStmts);
1600
146
  }
1601
1602
  // If we are in the live part of the code and we found our break statement,
1603
  // return a success!
1604
351
  if (!Case && 
isa<BreakStmt>(S)146
)
1605
60
    return CSFC_Success;
1606
1607
  // If this is a switch statement, then it might contain the SwitchCase, the
1608
  // break, or neither.
1609
291
  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1610
    // Handle this as two cases: we might be looking for the SwitchCase (if so
1611
    // the skipped statements must be skippable) or we might already have it.
1612
83
    CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1613
83
    bool StartedInLiveCode = FoundCase;
1614
83
    unsigned StartSize = ResultStmts.size();
1615
1616
    // If we've not found the case yet, scan through looking for it.
1617
83
    if (Case) {
1618
      // Keep track of whether we see a skipped declaration.  The code could be
1619
      // using the declaration even if it is skipped, so we can't optimize out
1620
      // the decl if the kept statements might refer to it.
1621
76
      bool HadSkippedDecl = false;
1622
1623
      // If we're looking for the case, just see if we can skip each of the
1624
      // substatements.
1625
268
      for (; Case && 
I != E206
;
++I192
) {
1626
205
        HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1627
1628
205
        switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1629
2
        case CSFC_Failure: return CSFC_Failure;
1630
138
        case CSFC_Success:
1631
          // A successful result means that either 1) that the statement doesn't
1632
          // have the case and is skippable, or 2) does contain the case value
1633
          // and also contains the break to exit the switch.  In the later case,
1634
          // we just verify the rest of the statements are elidable.
1635
138
          if (FoundCase) {
1636
            // If we found the case and skipped declarations, we can't do the
1637
            // optimization.
1638
8
            if (HadSkippedDecl)
1639
0
              return CSFC_Failure;
1640
1641
18
            
for (++I; 8
I != E;
++I10
)
1642
10
              if (CodeGenFunction::ContainsLabel(*I, true))
1643
0
                return CSFC_Failure;
1644
8
            return CSFC_Success;
1645
8
          }
1646
130
          break;
1647
130
        case CSFC_FallThrough:
1648
          // If we have a fallthrough condition, then we must have found the
1649
          // case started to include statements.  Consider the rest of the
1650
          // statements in the compound statement as candidates for inclusion.
1651
65
          assert(FoundCase && "Didn't find case but returned fallthrough?");
1652
          // We recursively found Case, so we're not looking for it anymore.
1653
0
          Case = nullptr;
1654
1655
          // If we found the case and skipped declarations, we can't do the
1656
          // optimization.
1657
65
          if (HadSkippedDecl)
1658
3
            return CSFC_Failure;
1659
62
          break;
1660
205
        }
1661
205
      }
1662
1663
63
      if (!FoundCase)
1664
1
        return CSFC_Success;
1665
1666
62
      assert(!HadSkippedDecl && "fallthrough after skipping decl");
1667
62
    }
1668
1669
    // If we have statements in our range, then we know that the statements are
1670
    // live and need to be added to the set of statements we're tracking.
1671
69
    bool AnyDecls = false;
1672
84
    for (; I != E; 
++I15
) {
1673
68
      AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1674
1675
68
      switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1676
0
      case CSFC_Failure: return CSFC_Failure;
1677
15
      case CSFC_FallThrough:
1678
        // A fallthrough result means that the statement was simple and just
1679
        // included in ResultStmt, keep adding them afterwards.
1680
15
        break;
1681
53
      case CSFC_Success:
1682
        // A successful result means that we found the break statement and
1683
        // stopped statement inclusion.  We just ensure that any leftover stmts
1684
        // are skippable and return success ourselves.
1685
165
        for (++I; I != E; 
++I112
)
1686
112
          if (CodeGenFunction::ContainsLabel(*I, true))
1687
0
            return CSFC_Failure;
1688
53
        return CSFC_Success;
1689
68
      }
1690
68
    }
1691
1692
    // If we're about to fall out of a scope without hitting a 'break;', we
1693
    // can't perform the optimization if there were any decls in that scope
1694
    // (we'd lose their end-of-lifetime).
1695
16
    if (AnyDecls) {
1696
      // If the entire compound statement was live, there's one more thing we
1697
      // can try before giving up: emit the whole thing as a single statement.
1698
      // We can do that unless the statement contains a 'break;'.
1699
      // FIXME: Such a break must be at the end of a construct within this one.
1700
      // We could emit this by just ignoring the BreakStmts entirely.
1701
3
      if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1702
3
        ResultStmts.resize(StartSize);
1703
3
        ResultStmts.push_back(S);
1704
3
      } else {
1705
0
        return CSFC_Failure;
1706
0
      }
1707
3
    }
1708
1709
16
    return CSFC_FallThrough;
1710
16
  }
1711
1712
  // Okay, this is some other statement that we don't handle explicitly, like a
1713
  // for statement or increment etc.  If we are skipping over this statement,
1714
  // just verify it doesn't have labels, which would make it invalid to elide.
1715
208
  if (Case) {
1716
129
    if (CodeGenFunction::ContainsLabel(S, true))
1717
0
      return CSFC_Failure;
1718
129
    return CSFC_Success;
1719
129
  }
1720
1721
  // Otherwise, we want to include this statement.  Everything is cool with that
1722
  // so long as it doesn't contain a break out of the switch we're in.
1723
79
  if (CodeGenFunction::containsBreak(S)) 
return CSFC_Failure1
;
1724
1725
  // Otherwise, everything is great.  Include the statement and tell the caller
1726
  // that we fall through and include the next statement as well.
1727
78
  ResultStmts.push_back(S);
1728
78
  return CSFC_FallThrough;
1729
79
}
1730
1731
/// FindCaseStatementsForValue - Find the case statement being jumped to and
1732
/// then invoke CollectStatementsForCase to find the list of statements to emit
1733
/// for a switch on constant.  See the comment above CollectStatementsForCase
1734
/// for more details.
1735
static bool FindCaseStatementsForValue(const SwitchStmt &S,
1736
                                       const llvm::APSInt &ConstantCondValue,
1737
                                SmallVectorImpl<const Stmt*> &ResultStmts,
1738
                                       ASTContext &C,
1739
94
                                       const SwitchCase *&ResultCase) {
1740
  // First step, find the switch case that is being branched to.  We can do this
1741
  // efficiently by scanning the SwitchCase list.
1742
94
  const SwitchCase *Case = S.getSwitchCaseList();
1743
94
  const DefaultStmt *DefaultCase = nullptr;
1744
1745
234
  for (; Case; 
Case = Case->getNextSwitchCase()140
) {
1746
    // It's either a default or case.  Just remember the default statement in
1747
    // case we're not jumping to any numbered cases.
1748
198
    if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1749
66
      DefaultCase = DS;
1750
66
      continue;
1751
66
    }
1752
1753
    // Check to see if this case is the one we're looking for.
1754
132
    const CaseStmt *CS = cast<CaseStmt>(Case);
1755
    // Don't handle case ranges yet.
1756
132
    if (CS->getRHS()) 
return false7
;
1757
1758
    // If we found our case, remember it as 'case'.
1759
125
    if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1760
51
      break;
1761
125
  }
1762
1763
  // If we didn't find a matching case, we use a default if it exists, or we
1764
  // elide the whole switch body!
1765
87
  if (!Case) {
1766
    // It is safe to elide the body of the switch if it doesn't contain labels
1767
    // etc.  If it is safe, return successfully with an empty ResultStmts list.
1768
36
    if (!DefaultCase)
1769
9
      return !CodeGenFunction::ContainsLabel(&S);
1770
27
    Case = DefaultCase;
1771
27
  }
1772
1773
  // Ok, we know which case is being jumped to, try to collect all the
1774
  // statements that follow it.  This can fail for a variety of reasons.  Also,
1775
  // check to see that the recursive walk actually found our case statement.
1776
  // Insane cases like this can fail to find it in the recursive walk since we
1777
  // don't handle every stmt kind:
1778
  // switch (4) {
1779
  //   while (1) {
1780
  //     case 4: ...
1781
78
  bool FoundCase = false;
1782
78
  ResultCase = Case;
1783
78
  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1784
78
                                  ResultStmts) != CSFC_Failure &&
1785
78
         
FoundCase74
;
1786
87
}
1787
1788
static Optional<SmallVector<uint64_t, 16>>
1789
62
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1790
  // Are there enough branches to weight them?
1791
62
  if (Likelihoods.size() <= 1)
1792
9
    return None;
1793
1794
53
  uint64_t NumUnlikely = 0;
1795
53
  uint64_t NumNone = 0;
1796
53
  uint64_t NumLikely = 0;
1797
218
  for (const auto LH : Likelihoods) {
1798
218
    switch (LH) {
1799
8
    case Stmt::LH_Unlikely:
1800
8
      ++NumUnlikely;
1801
8
      break;
1802
199
    case Stmt::LH_None:
1803
199
      ++NumNone;
1804
199
      break;
1805
11
    case Stmt::LH_Likely:
1806
11
      ++NumLikely;
1807
11
      break;
1808
218
    }
1809
218
  }
1810
1811
  // Is there a likelihood attribute used?
1812
53
  if (NumUnlikely == 0 && 
NumLikely == 045
)
1813
37
    return None;
1814
1815
  // When multiple cases share the same code they can be combined during
1816
  // optimization. In that case the weights of the branch will be the sum of
1817
  // the individual weights. Make sure the combined sum of all neutral cases
1818
  // doesn't exceed the value of a single likely attribute.
1819
  // The additions both avoid divisions by 0 and make sure the weights of None
1820
  // don't exceed the weight of Likely.
1821
16
  const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1822
16
  const uint64_t None = Likely / (NumNone + 1);
1823
16
  const uint64_t Unlikely = 0;
1824
1825
16
  SmallVector<uint64_t, 16> Result;
1826
16
  Result.reserve(Likelihoods.size());
1827
60
  for (const auto LH : Likelihoods) {
1828
60
    switch (LH) {
1829
8
    case Stmt::LH_Unlikely:
1830
8
      Result.push_back(Unlikely);
1831
8
      break;
1832
41
    case Stmt::LH_None:
1833
41
      Result.push_back(None);
1834
41
      break;
1835
11
    case Stmt::LH_Likely:
1836
11
      Result.push_back(Likely);
1837
11
      break;
1838
60
    }
1839
60
  }
1840
1841
16
  return Result;
1842
16
}
1843
1844
492
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1845
  // Handle nested switch statements.
1846
492
  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1847
492
  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1848
492
  SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1849
492
  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1850
1851
  // See if we can constant fold the condition of the switch and therefore only
1852
  // emit the live case statement (if any) of the switch.
1853
492
  llvm::APSInt ConstantCondValue;
1854
492
  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1855
94
    SmallVector<const Stmt*, 4> CaseStmts;
1856
94
    const SwitchCase *Case = nullptr;
1857
94
    if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1858
94
                                   getContext(), Case)) {
1859
83
      if (Case)
1860
74
        incrementProfileCounter(Case);
1861
83
      RunCleanupsScope ExecutedScope(*this);
1862
1863
83
      if (S.getInit())
1864
0
        EmitStmt(S.getInit());
1865
1866
      // Emit the condition variable if needed inside the entire cleanup scope
1867
      // used by this special case for constant folded switches.
1868
83
      if (S.getConditionVariable())
1869
0
        EmitDecl(*S.getConditionVariable());
1870
1871
      // At this point, we are no longer "within" a switch instance, so
1872
      // we can temporarily enforce this to ensure that any embedded case
1873
      // statements are not emitted.
1874
83
      SwitchInsn = nullptr;
1875
1876
      // Okay, we can dead code eliminate everything except this case.  Emit the
1877
      // specified series of statements and we're good.
1878
155
      for (unsigned i = 0, e = CaseStmts.size(); i != e; 
++i72
)
1879
72
        EmitStmt(CaseStmts[i]);
1880
83
      incrementProfileCounter(&S);
1881
1882
      // Now we want to restore the saved switch instance so that nested
1883
      // switches continue to function properly
1884
83
      SwitchInsn = SavedSwitchInsn;
1885
1886
83
      return;
1887
83
    }
1888
94
  }
1889
1890
409
  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1891
1892
409
  RunCleanupsScope ConditionScope(*this);
1893
1894
409
  if (S.getInit())
1895
7
    EmitStmt(S.getInit());
1896
1897
409
  if (S.getConditionVariable())
1898
4
    EmitDecl(*S.getConditionVariable());
1899
409
  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1900
1901
  // Create basic block to hold stuff that comes after switch
1902
  // statement. We also need to create a default block now so that
1903
  // explicit case ranges tests can have a place to jump to on
1904
  // failure.
1905
409
  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1906
409
  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1907
409
  if (PGO.haveRegionCounts()) {
1908
    // Walk the SwitchCase list to find how many there are.
1909
23
    uint64_t DefaultCount = 0;
1910
23
    unsigned NumCases = 0;
1911
23
    for (const SwitchCase *Case = S.getSwitchCaseList();
1912
101
         Case;
1913
78
         Case = Case->getNextSwitchCase()) {
1914
78
      if (isa<DefaultStmt>(Case))
1915
15
        DefaultCount = getProfileCount(Case);
1916
78
      NumCases += 1;
1917
78
    }
1918
23
    SwitchWeights = new SmallVector<uint64_t, 16>();
1919
23
    SwitchWeights->reserve(NumCases);
1920
    // The default needs to be first. We store the edge count, so we already
1921
    // know the right weight.
1922
23
    SwitchWeights->push_back(DefaultCount);
1923
386
  } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1924
62
    SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1925
    // Initialize the default case.
1926
62
    SwitchLikelihood->push_back(Stmt::LH_None);
1927
62
  }
1928
1929
409
  CaseRangeBlock = DefaultBlock;
1930
1931
  // Clear the insertion point to indicate we are in unreachable code.
1932
409
  Builder.ClearInsertionPoint();
1933
1934
  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1935
  // then reuse last ContinueBlock.
1936
409
  JumpDest OuterContinue;
1937
409
  if (!BreakContinueStack.empty())
1938
44
    OuterContinue = BreakContinueStack.back().ContinueBlock;
1939
1940
409
  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1941
1942
  // Emit switch body.
1943
409
  EmitStmt(S.getBody());
1944
1945
409
  BreakContinueStack.pop_back();
1946
1947
  // Update the default block in case explicit case range tests have
1948
  // been chained on top.
1949
409
  SwitchInsn->setDefaultDest(CaseRangeBlock);
1950
1951
  // If a default was never emitted:
1952
409
  if (!DefaultBlock->getParent()) {
1953
    // If we have cleanups, emit the default block so that there's a
1954
    // place to jump through the cleanups from.
1955
224
    if (ConditionScope.requiresCleanups()) {
1956
0
      EmitBlock(DefaultBlock);
1957
1958
    // Otherwise, just forward the default block to the switch end.
1959
224
    } else {
1960
224
      DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1961
224
      delete DefaultBlock;
1962
224
    }
1963
224
  }
1964
1965
409
  ConditionScope.ForceCleanup();
1966
1967
  // Emit continuation.
1968
409
  EmitBlock(SwitchExit.getBlock(), true);
1969
409
  incrementProfileCounter(&S);
1970
1971
  // If the switch has a condition wrapped by __builtin_unpredictable,
1972
  // create metadata that specifies that the switch is unpredictable.
1973
  // Don't bother if not optimizing because that metadata would not be used.
1974
409
  auto *Call = dyn_cast<CallExpr>(S.getCond());
1975
409
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 041
) {
1976
8
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1977
8
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1978
2
      llvm::MDBuilder MDHelper(getLLVMContext());
1979
2
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1980
2
                              MDHelper.createUnpredictable());
1981
2
    }
1982
8
  }
1983
1984
409
  if (SwitchWeights) {
1985
23
    assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1986
23
           "switch weights do not match switch cases");
1987
    // If there's only one jump destination there's no sense weighting it.
1988
23
    if (SwitchWeights->size() > 1)
1989
19
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1990
19
                              createProfileWeights(*SwitchWeights));
1991
23
    delete SwitchWeights;
1992
386
  } else if (SwitchLikelihood) {
1993
62
    assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
1994
62
           "switch likelihoods do not match switch cases");
1995
0
    Optional<SmallVector<uint64_t, 16>> LHW =
1996
62
        getLikelihoodWeights(*SwitchLikelihood);
1997
62
    if (LHW) {
1998
16
      llvm::MDBuilder MDHelper(CGM.getLLVMContext());
1999
16
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2000
16
                              createProfileWeights(*LHW));
2001
16
    }
2002
62
    delete SwitchLikelihood;
2003
62
  }
2004
0
  SwitchInsn = SavedSwitchInsn;
2005
409
  SwitchWeights = SavedSwitchWeights;
2006
409
  SwitchLikelihood = SavedSwitchLikelihood;
2007
409
  CaseRangeBlock = SavedCRBlock;
2008
409
}
2009
2010
static std::string
2011
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2012
2.87k
                 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2013
2.87k
  std::string Result;
2014
2015
7.13k
  while (*Constraint) {
2016
4.25k
    switch (*Constraint) {
2017
3.46k
    default:
2018
3.46k
      Result += Target.convertConstraint(Constraint);
2019
3.46k
      break;
2020
    // Ignore these
2021
1
    case '*':
2022
1
    case '?':
2023
1
    case '!':
2024
3
    case '=': // Will see this and the following in mult-alt constraints.
2025
3
    case '+':
2026
3
      break;
2027
1
    case '#': // Ignore the rest of the constraint alternative.
2028
3
      while (Constraint[1] && Constraint[1] != ',')
2029
2
        Constraint++;
2030
1
      break;
2031
15
    case '&':
2032
17
    case '%':
2033
17
      Result += *Constraint;
2034
19
      while (Constraint[1] && Constraint[1] == *Constraint)
2035
2
        Constraint++;
2036
17
      break;
2037
622
    case ',':
2038
622
      Result += "|";
2039
622
      break;
2040
143
    case 'g':
2041
143
      Result += "imr";
2042
143
      break;
2043
6
    case '[': {
2044
6
      assert(OutCons &&
2045
6
             "Must pass output names to constraints with a symbolic name");
2046
0
      unsigned Index;
2047
6
      bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2048
6
      assert(result && "Could not resolve symbolic name"); (void)result;
2049
6
      Result += llvm::utostr(Index);
2050
6
      break;
2051
15
    }
2052
4.25k
    }
2053
2054
4.25k
    Constraint++;
2055
4.25k
  }
2056
2057
2.87k
  return Result;
2058
2.87k
}
2059
2060
/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2061
/// as using a particular register add that as a constraint that will be used
2062
/// in this asm stmt.
2063
static std::string
2064
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2065
                       const TargetInfo &Target, CodeGenModule &CGM,
2066
                       const AsmStmt &Stmt, const bool EarlyClobber,
2067
2.87k
                       std::string *GCCReg = nullptr) {
2068
2.87k
  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2069
2.87k
  if (!AsmDeclRef)
2070
822
    return Constraint;
2071
2.05k
  const ValueDecl &Value = *AsmDeclRef->getDecl();
2072
2.05k
  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2073
2.05k
  if (!Variable)
2074
11
    return Constraint;
2075
2.04k
  if (Variable->getStorageClass() != SC_Register)
2076
1.40k
    return Constraint;
2077
643
  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2078
643
  if (!Attr)
2079
576
    return Constraint;
2080
67
  StringRef Register = Attr->getLabel();
2081
67
  assert(Target.isValidGCCRegisterName(Register));
2082
  // We're using validateOutputConstraint here because we only care if
2083
  // this is a register constraint.
2084
0
  TargetInfo::ConstraintInfo Info(Constraint, "");
2085
67
  if (Target.validateOutputConstraint(Info) &&
2086
67
      
!Info.allowsRegister()0
) {
2087
0
    CGM.ErrorUnsupported(&Stmt, "__asm__");
2088
0
    return Constraint;
2089
0
  }
2090
  // Canonicalize the register here before returning it.
2091
67
  Register = Target.getNormalizedGCCRegisterName(Register);
2092
67
  if (GCCReg != nullptr)
2093
14
    *GCCReg = Register.str();
2094
67
  return (EarlyClobber ? 
"&{"5
:
"{"62
) + Register.str() + "}";
2095
67
}
2096
2097
llvm::Value*
2098
CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2099
                                    LValue InputValue, QualType InputType,
2100
                                    std::string &ConstraintStr,
2101
297
                                    SourceLocation Loc) {
2102
297
  llvm::Value *Arg;
2103
297
  if (Info.allowsRegister() || 
!Info.allowsMemory()168
) {
2104
129
    if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
2105
124
      Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
2106
124
    } else {
2107
5
      llvm::Type *Ty = ConvertType(InputType);
2108
5
      uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2109
5
      if ((Size <= 64 && 
llvm::isPowerOf2_64(Size)3
) ||
2110
5
          
getTargetHooks().isScalarizableAsmOperand(*this, Ty)2
) {
2111
5
        Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2112
5
        Ty = llvm::PointerType::getUnqual(Ty);
2113
2114
5
        Arg = Builder.CreateLoad(
2115
5
            Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
2116
5
      } else {
2117
0
        Arg = InputValue.getPointer(*this);
2118
0
        ConstraintStr += '*';
2119
0
      }
2120
5
    }
2121
168
  } else {
2122
168
    Arg = InputValue.getPointer(*this);
2123
168
    ConstraintStr += '*';
2124
168
  }
2125
2126
297
  return Arg;
2127
297
}
2128
2129
llvm::Value* CodeGenFunction::EmitAsmInput(
2130
                                         const TargetInfo::ConstraintInfo &Info,
2131
                                           const Expr *InputExpr,
2132
1.70k
                                           std::string &ConstraintStr) {
2133
  // If this can't be a register or memory, i.e., has to be a constant
2134
  // (immediate or symbolic), try to emit it as such.
2135
1.70k
  if (!Info.allowsRegister() && 
!Info.allowsMemory()439
) {
2136
283
    if (Info.requiresImmediateConstant()) {
2137
69
      Expr::EvalResult EVResult;
2138
69
      InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2139
2140
69
      llvm::APSInt IntResult;
2141
69
      if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2142
69
                                          getContext()))
2143
67
        return llvm::ConstantInt::get(getLLVMContext(), IntResult);
2144
69
    }
2145
2146
216
    Expr::EvalResult Result;
2147
216
    if (InputExpr->EvaluateAsInt(Result, getContext()))
2148
166
      return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
2149
216
  }
2150
2151
1.47k
  if (Info.allowsRegister() || 
!Info.allowsMemory()206
)
2152
1.32k
    if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2153
1.31k
      return EmitScalarExpr(InputExpr);
2154
161
  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2155
3
    return EmitScalarExpr(InputExpr);
2156
158
  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2157
158
  LValue Dest = EmitLValue(InputExpr);
2158
158
  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2159
158
                            InputExpr->getExprLoc());
2160
161
}
2161
2162
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2163
/// asm call instruction.  The !srcloc MDNode contains a list of constant
2164
/// integers which are the source locations of the start of each line in the
2165
/// asm.
2166
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2167
1.66k
                                      CodeGenFunction &CGF) {
2168
1.66k
  SmallVector<llvm::Metadata *, 8> Locs;
2169
  // Add the location of the first line to the MDNode.
2170
1.66k
  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2171
1.66k
      CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2172
1.66k
  StringRef StrVal = Str->getString();
2173
1.66k
  if (!StrVal.empty()) {
2174
1.59k
    const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2175
1.59k
    const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2176
1.59k
    unsigned StartToken = 0;
2177
1.59k
    unsigned ByteOffset = 0;
2178
2179
    // Add the location of the start of each subsequent line of the asm to the
2180
    // MDNode.
2181
28.4k
    for (unsigned i = 0, e = StrVal.size() - 1; i != e; 
++i26.8k
) {
2182
26.8k
      if (StrVal[i] != '\n') 
continue26.1k
;
2183
650
      SourceLocation LineLoc = Str->getLocationOfByte(
2184
650
          i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2185
650
      Locs.push_back(llvm::ConstantAsMetadata::get(
2186
650
          llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2187
650
    }
2188
1.59k
  }
2189
2190
1.66k
  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2191
1.66k
}
2192
2193
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2194
                              bool HasUnwindClobber, bool ReadOnly,
2195
                              bool ReadNone, bool NoMerge, const AsmStmt &S,
2196
                              const std::vector<llvm::Type *> &ResultRegTypes,
2197
                              CodeGenFunction &CGF,
2198
1.82k
                              std::vector<llvm::Value *> &RegResults) {
2199
1.82k
  if (!HasUnwindClobber)
2200
1.82k
    Result.addFnAttr(llvm::Attribute::NoUnwind);
2201
2202
1.82k
  if (NoMerge)
2203
1
    Result.addFnAttr(llvm::Attribute::NoMerge);
2204
  // Attach readnone and readonly attributes.
2205
1.82k
  if (!HasSideEffect) {
2206
775
    if (ReadNone)
2207
315
      Result.addFnAttr(llvm::Attribute::ReadNone);
2208
460
    else if (ReadOnly)
2209
319
      Result.addFnAttr(llvm::Attribute::ReadOnly);
2210
775
  }
2211
2212
  // Slap the source location of the inline asm into a !srcloc metadata on the
2213
  // call.
2214
1.82k
  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2215
1.66k
    Result.setMetadata("srcloc",
2216
1.66k
                       getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2217
161
  else {
2218
    // At least put the line number on MS inline asm blobs.
2219
161
    llvm::Constant *Loc =
2220
161
        llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2221
161
    Result.setMetadata("srcloc",
2222
161
                       llvm::MDNode::get(CGF.getLLVMContext(),
2223
161
                                         llvm::ConstantAsMetadata::get(Loc)));
2224
161
  }
2225
2226
1.82k
  if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2227
    // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2228
    // convergent (meaning, they may call an intrinsically convergent op, such
2229
    // as bar.sync, and so can't have certain optimizations applied around
2230
    // them).
2231
13
    Result.addFnAttr(llvm::Attribute::Convergent);
2232
  // Extract all of the register value results from the asm.
2233
1.82k
  if (ResultRegTypes.size() == 1) {
2234
811
    RegResults.push_back(&Result);
2235
1.01k
  } else {
2236
1.21k
    for (unsigned i = 0, e = ResultRegTypes.size(); i != e; 
++i204
) {
2237
204
      llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2238
204
      RegResults.push_back(Tmp);
2239
204
    }
2240
1.01k
  }
2241
1.82k
}
2242
2243
1.82k
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2244
  // Assemble the final asm string.
2245
1.82k
  std::string AsmString = S.generateAsmString(getContext());
2246
2247
  // Get all the output and input constraints together.
2248
1.82k
  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2249
1.82k
  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2250
2251
2.99k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.17k
) {
2252
1.17k
    StringRef Name;
2253
1.17k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2254
1.14k
      Name = GAS->getOutputName(i);
2255
1.17k
    TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2256
1.17k
    bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2257
1.17k
    assert(IsValid && "Failed to parse output constraint");
2258
0
    OutputConstraintInfos.push_back(Info);
2259
1.17k
  }
2260
2261
3.53k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.70k
) {
2262
1.70k
    StringRef Name;
2263
1.70k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2264
1.60k
      Name = GAS->getInputName(i);
2265
1.70k
    TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2266
1.70k
    bool IsValid =
2267
1.70k
      getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2268
1.70k
    assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2269
1.70k
    InputConstraintInfos.push_back(Info);
2270
1.70k
  }
2271
2272
1.82k
  std::string Constraints;
2273
2274
1.82k
  std::vector<LValue> ResultRegDests;
2275
1.82k
  std::vector<QualType> ResultRegQualTys;
2276
1.82k
  std::vector<llvm::Type *> ResultRegTypes;
2277
1.82k
  std::vector<llvm::Type *> ResultTruncRegTypes;
2278
1.82k
  std::vector<llvm::Type *> ArgTypes;
2279
1.82k
  std::vector<llvm::Value*> Args;
2280
1.82k
  llvm::BitVector ResultTypeRequiresCast;
2281
2282
  // Keep track of inout constraints.
2283
1.82k
  std::string InOutConstraints;
2284
1.82k
  std::vector<llvm::Value*> InOutArgs;
2285
1.82k
  std::vector<llvm::Type*> InOutArgTypes;
2286
2287
  // Keep track of out constraints for tied input operand.
2288
1.82k
  std::vector<std::string> OutputConstraints;
2289
2290
  // Keep track of defined physregs.
2291
1.82k
  llvm::SmallSet<std::string, 8> PhysRegOutputs;
2292
2293
  // An inline asm can be marked readonly if it meets the following conditions:
2294
  //  - it doesn't have any sideeffects
2295
  //  - it doesn't clobber memory
2296
  //  - it doesn't return a value by-reference
2297
  // It can be marked readnone if it doesn't have any input memory constraints
2298
  // in addition to meeting the conditions listed above.
2299
1.82k
  bool ReadOnly = true, ReadNone = true;
2300
2301
2.99k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.17k
) {
2302
1.17k
    TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2303
2304
    // Simplify the output constraint.
2305
1.17k
    std::string OutputConstraint(S.getOutputConstraint(i));
2306
1.17k
    OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2307
1.17k
                                          getTarget(), &OutputConstraintInfos);
2308
2309
1.17k
    const Expr *OutExpr = S.getOutputExpr(i);
2310
1.17k
    OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2311
2312
1.17k
    std::string GCCReg;
2313
1.17k
    OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2314
1.17k
                                              getTarget(), CGM, S,
2315
1.17k
                                              Info.earlyClobber(),
2316
1.17k
                                              &GCCReg);
2317
    // Give an error on multiple outputs to same physreg.
2318
1.17k
    if (!GCCReg.empty() && 
!PhysRegOutputs.insert(GCCReg).second14
)
2319
1
      CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2320
2321
1.17k
    OutputConstraints.push_back(OutputConstraint);
2322
1.17k
    LValue Dest = EmitLValue(OutExpr);
2323
1.17k
    if (!Constraints.empty())
2324
184
      Constraints += ',';
2325
2326
    // If this is a register output, then make the inline asm return it
2327
    // by-value.  If this is a memory result, return the value by-reference.
2328
1.17k
    QualType QTy = OutExpr->getType();
2329
1.17k
    const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2330
1.17k
                                     
hasAggregateEvaluationKind(QTy)18
;
2331
1.17k
    if (!Info.allowsMemory() && 
IsScalarOrAggregate989
) {
2332
2333
989
      Constraints += "=" + OutputConstraint;
2334
989
      ResultRegQualTys.push_back(QTy);
2335
989
      ResultRegDests.push_back(Dest);
2336
2337
989
      llvm::Type *Ty = ConvertTypeForMem(QTy);
2338
989
      const bool RequiresCast = Info.allowsRegister() &&
2339
989
          (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2340
989
           
Ty->isAggregateType()988
);
2341
2342
989
      ResultTruncRegTypes.push_back(Ty);
2343
989
      ResultTypeRequiresCast.push_back(RequiresCast);
2344
2345
989
      if (RequiresCast) {
2346
17
        unsigned Size = getContext().getTypeSize(QTy);
2347
17
        Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2348
17
      }
2349
989
      ResultRegTypes.push_back(Ty);
2350
      // If this output is tied to an input, and if the input is larger, then
2351
      // we need to set the actual result type of the inline asm node to be the
2352
      // same as the input type.
2353
989
      if (Info.hasMatchingInput()) {
2354
57
        unsigned InputNo;
2355
66
        for (InputNo = 0; InputNo != S.getNumInputs(); 
++InputNo9
) {
2356
66
          TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2357
66
          if (Input.hasTiedOperand() && 
Input.getTiedOperand() == i65
)
2358
57
            break;
2359
66
        }
2360
57
        assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2361
2362
0
        QualType InputTy = S.getInputExpr(InputNo)->getType();
2363
57
        QualType OutputType = OutExpr->getType();
2364
2365
57
        uint64_t InputSize = getContext().getTypeSize(InputTy);
2366
57
        if (getContext().getTypeSize(OutputType) < InputSize) {
2367
          // Form the asm to return the value as a larger integer or fp type.
2368
4
          ResultRegTypes.back() = ConvertType(InputTy);
2369
4
        }
2370
57
      }
2371
989
      if (llvm::Type* AdjTy =
2372
989
            getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2373
989
                                                 ResultRegTypes.back()))
2374
989
        ResultRegTypes.back() = AdjTy;
2375
0
      else {
2376
0
        CGM.getDiags().Report(S.getAsmLoc(),
2377
0
                              diag::err_asm_invalid_type_in_input)
2378
0
            << OutExpr->getType() << OutputConstraint;
2379
0
      }
2380
2381
      // Update largest vector width for any vector types.
2382
989
      if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2383
135
        LargestVectorWidth =
2384
135
            std::max((uint64_t)LargestVectorWidth,
2385
135
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2386
989
    } else {
2387
181
      llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
2388
181
      llvm::Value *DestPtr = Dest.getPointer(*this);
2389
      // Matrix types in memory are represented by arrays, but accessed through
2390
      // vector pointers, with the alignment specified on the access operation.
2391
      // For inline assembly, update pointer arguments to use vector pointers.
2392
      // Otherwise there will be a mis-match if the matrix is also an
2393
      // input-argument which is represented as vector.
2394
181
      if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
2395
1
        DestAddrTy = llvm::PointerType::get(
2396
1
            ConvertType(OutExpr->getType()),
2397
1
            cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
2398
1
        DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
2399
1
      }
2400
181
      ArgTypes.push_back(DestAddrTy);
2401
181
      Args.push_back(DestPtr);
2402
181
      Constraints += "=*";
2403
181
      Constraints += OutputConstraint;
2404
181
      ReadOnly = ReadNone = false;
2405
181
    }
2406
2407
1.17k
    if (Info.isReadWrite()) {
2408
139
      InOutConstraints += ',';
2409
2410
139
      const Expr *InputExpr = S.getOutputExpr(i);
2411
139
      llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2412
139
                                            InOutConstraints,
2413
139
                                            InputExpr->getExprLoc());
2414
2415
139
      if (llvm::Type* AdjTy =
2416
139
          getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2417
139
                                               Arg->getType()))
2418
139
        Arg = Builder.CreateBitCast(Arg, AdjTy);
2419
2420
      // Update largest vector width for any vector types.
2421
139
      if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2422
6
        LargestVectorWidth =
2423
6
            std::max((uint64_t)LargestVectorWidth,
2424
6
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2425
      // Only tie earlyclobber physregs.
2426
139
      if (Info.allowsRegister() && 
(124
GCCReg.empty()124
||
Info.earlyClobber()4
))
2427
121
        InOutConstraints += llvm::utostr(i);
2428
18
      else
2429
18
        InOutConstraints += OutputConstraint;
2430
2431
139
      InOutArgTypes.push_back(Arg->getType());
2432
139
      InOutArgs.push_back(Arg);
2433
139
    }
2434
1.17k
  }
2435
2436
  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2437
  // to the return value slot. Only do this when returning in registers.
2438
1.82k
  if (isa<MSAsmStmt>(&S)) {
2439
161
    const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2440
161
    if (RetAI.isDirect() || 
RetAI.isExtend()135
) {
2441
      // Make a fake lvalue for the return value slot.
2442
29
      LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2443
29
      CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2444
29
          *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2445
29
          ResultRegDests, AsmString, S.getNumOutputs());
2446
29
      SawAsmBlock = true;
2447
29
    }
2448
161
  }
2449
2450
3.53k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.70k
) {
2451
1.70k
    const Expr *InputExpr = S.getInputExpr(i);
2452
2453
1.70k
    TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2454
2455
1.70k
    if (Info.allowsMemory())
2456
591
      ReadNone = false;
2457
2458
1.70k
    if (!Constraints.empty())
2459
1.42k
      Constraints += ',';
2460
2461
    // Simplify the input constraint.
2462
1.70k
    std::string InputConstraint(S.getInputConstraint(i));
2463
1.70k
    InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2464
1.70k
                                         &OutputConstraintInfos);
2465
2466
1.70k
    InputConstraint = AddVariableConstraints(
2467
1.70k
        InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2468
1.70k
        getTarget(), CGM, S, false /* No EarlyClobber */);
2469
2470
1.70k
    std::string ReplaceConstraint (InputConstraint);
2471
1.70k
    llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2472
2473
    // If this input argument is tied to a larger output result, extend the
2474
    // input to be the same size as the output.  The LLVM backend wants to see
2475
    // the input and output of a matching constraint be the same size.  Note
2476
    // that GCC does not define what the top bits are here.  We use zext because
2477
    // that is usually cheaper, but LLVM IR should really get an anyext someday.
2478
1.70k
    if (Info.hasTiedOperand()) {
2479
57
      unsigned Output = Info.getTiedOperand();
2480
57
      QualType OutputType = S.getOutputExpr(Output)->getType();
2481
57
      QualType InputTy = InputExpr->getType();
2482
2483
57
      if (getContext().getTypeSize(OutputType) >
2484
57
          getContext().getTypeSize(InputTy)) {
2485
        // Use ptrtoint as appropriate so that we can do our extension.
2486
7
        if (isa<llvm::PointerType>(Arg->getType()))
2487
0
          Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2488
7
        llvm::Type *OutputTy = ConvertType(OutputType);
2489
7
        if (isa<llvm::IntegerType>(OutputTy))
2490
3
          Arg = Builder.CreateZExt(Arg, OutputTy);
2491
4
        else if (isa<llvm::PointerType>(OutputTy))
2492
1
          Arg = Builder.CreateZExt(Arg, IntPtrTy);
2493
3
        else {
2494
3
          assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2495
0
          Arg = Builder.CreateFPExt(Arg, OutputTy);
2496
3
        }
2497
7
      }
2498
      // Deal with the tied operands' constraint code in adjustInlineAsmType.
2499
0
      ReplaceConstraint = OutputConstraints[Output];
2500
57
    }
2501
1.70k
    if (llvm::Type* AdjTy =
2502
1.70k
          getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2503
1.70k
                                                   Arg->getType()))
2504
1.70k
      Arg = Builder.CreateBitCast(Arg, AdjTy);
2505
0
    else
2506
0
      CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2507
0
          << InputExpr->getType() << InputConstraint;
2508
2509
    // Update largest vector width for any vector types.
2510
1.70k
    if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2511
147
      LargestVectorWidth =
2512
147
          std::max((uint64_t)LargestVectorWidth,
2513
147
                   VT->getPrimitiveSizeInBits().getKnownMinSize());
2514
2515
1.70k
    ArgTypes.push_back(Arg->getType());
2516
1.70k
    Args.push_back(Arg);
2517
1.70k
    Constraints += InputConstraint;
2518
1.70k
  }
2519
2520
  // Labels
2521
1.82k
  SmallVector<llvm::BasicBlock *, 16> Transfer;
2522
1.82k
  llvm::BasicBlock *Fallthrough = nullptr;
2523
1.82k
  bool IsGCCAsmGoto = false;
2524
1.82k
  if (const auto *GS =  dyn_cast<GCCAsmStmt>(&S)) {
2525
1.66k
    IsGCCAsmGoto = GS->isAsmGoto();
2526
1.66k
    if (IsGCCAsmGoto) {
2527
46
      for (const auto *E : GS->labels()) {
2528
46
        JumpDest Dest = getJumpDestForLabel(E->getLabel());
2529
46
        Transfer.push_back(Dest.getBlock());
2530
46
        llvm::BlockAddress *BA =
2531
46
            llvm::BlockAddress::get(CurFn, Dest.getBlock());
2532
46
        Args.push_back(BA);
2533
46
        ArgTypes.push_back(BA->getType());
2534
46
        if (!Constraints.empty())
2535
41
          Constraints += ',';
2536
46
        Constraints += 'X';
2537
46
      }
2538
27
      Fallthrough = createBasicBlock("asm.fallthrough");
2539
27
    }
2540
1.66k
  }
2541
2542
  // Append the "input" part of inout constraints last.
2543
1.96k
  for (unsigned i = 0, e = InOutArgs.size(); i != e; 
i++139
) {
2544
139
    ArgTypes.push_back(InOutArgTypes[i]);
2545
139
    Args.push_back(InOutArgs[i]);
2546
139
  }
2547
1.82k
  Constraints += InOutConstraints;
2548
2549
1.82k
  bool HasUnwindClobber = false;
2550
2551
  // Clobbers
2552
3.17k
  for (unsigned i = 0, e = S.getNumClobbers(); i != e; 
i++1.35k
) {
2553
1.35k
    StringRef Clobber = S.getClobber(i);
2554
2555
1.35k
    if (Clobber == "memory")
2556
142
      ReadOnly = ReadNone = false;
2557
1.20k
    else if (Clobber == "unwind") {
2558
1
      HasUnwindClobber = true;
2559
1
      continue;
2560
1.20k
    } else if (Clobber != "cc") {
2561
1.16k
      Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2562
1.16k
      if (CGM.getCodeGenOpts().StackClashProtector &&
2563
1.16k
          
getTarget().isSPRegName(Clobber)3
) {
2564
3
        CGM.getDiags().Report(S.getAsmLoc(),
2565
3
                              diag::warn_stack_clash_protection_inline_asm);
2566
3
      }
2567
1.16k
    }
2568
2569
1.34k
    if (isa<MSAsmStmt>(&S)) {
2570
194
      if (Clobber == "eax" || 
Clobber == "edx"96
) {
2571
114
        if (Constraints.find("=&A") != std::string::npos)
2572
3
          continue;
2573
111
        std::string::size_type position1 =
2574
111
            Constraints.find("={" + Clobber.str() + "}");
2575
111
        if (position1 != std::string::npos) {
2576
13
          Constraints.insert(position1 + 1, "&");
2577
13
          continue;
2578
13
        }
2579
98
        std::string::size_type position2 = Constraints.find("=A");
2580
98
        if (position2 != std::string::npos) {
2581
3
          Constraints.insert(position2 + 1, "&");
2582
3
          continue;
2583
3
        }
2584
98
      }
2585
194
    }
2586
1.33k
    if (!Constraints.empty())
2587
933
      Constraints += ',';
2588
2589
1.33k
    Constraints += "~{";
2590
1.33k
    Constraints += Clobber;
2591
1.33k
    Constraints += '}';
2592
1.33k
  }
2593
2594
1.82k
  assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2595
1.82k
         "unwind clobber can't be used with asm goto");
2596
2597
  // Add machine specific clobbers
2598
0
  std::string MachineClobbers = getTarget().getClobbers();
2599
1.82k
  if (!MachineClobbers.empty()) {
2600
1.02k
    if (!Constraints.empty())
2601
906
      Constraints += ',';
2602
1.02k
    Constraints += MachineClobbers;
2603
1.02k
  }
2604
2605
1.82k
  llvm::Type *ResultType;
2606
1.82k
  if (ResultRegTypes.empty())
2607
941
    ResultType = VoidTy;
2608
881
  else if (ResultRegTypes.size() == 1)
2609
811
    ResultType = ResultRegTypes[0];
2610
70
  else
2611
70
    ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2612
2613
1.82k
  llvm::FunctionType *FTy =
2614
1.82k
    llvm::FunctionType::get(ResultType, ArgTypes, false);
2615
2616
1.82k
  bool HasSideEffect = S.isVolatile() || 
S.getNumOutputs() == 01.10k
;
2617
1.82k
  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2618
1.66k
    
llvm::InlineAsm::AD_Intel161
: llvm::InlineAsm::AD_ATT;
2619
1.82k
  llvm::InlineAsm *IA = llvm::InlineAsm::get(
2620
1.82k
      FTy, AsmString, Constraints, HasSideEffect,
2621
1.82k
      /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2622
1.82k
  std::vector<llvm::Value*> RegResults;
2623
1.82k
  if (IsGCCAsmGoto) {
2624
27
    llvm::CallBrInst *Result =
2625
27
        Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2626
27
    EmitBlock(Fallthrough);
2627
27
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2628
27
                      ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2629
27
                      ResultRegTypes, *this, RegResults);
2630
1.79k
  } else if (HasUnwindClobber) {
2631
1
    llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2632
1
    UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2633
1
                      InNoMergeAttributedStmt, S, ResultRegTypes, *this,
2634
1
                      RegResults);
2635
1.79k
  } else {
2636
1.79k
    llvm::CallInst *Result =
2637
1.79k
        Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2638
1.79k
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2639
1.79k
                      ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2640
1.79k
                      ResultRegTypes, *this, RegResults);
2641
1.79k
  }
2642
2643
1.82k
  assert(RegResults.size() == ResultRegTypes.size());
2644
0
  assert(RegResults.size() == ResultTruncRegTypes.size());
2645
0
  assert(RegResults.size() == ResultRegDests.size());
2646
  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2647
  // in which case its size may grow.
2648
0
  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2649
2.83k
  for (unsigned i = 0, e = RegResults.size(); i != e; 
++i1.01k
) {
2650
1.01k
    llvm::Value *Tmp = RegResults[i];
2651
1.01k
    llvm::Type *TruncTy = ResultTruncRegTypes[i];
2652
2653
    // If the result type of the LLVM IR asm doesn't match the result type of
2654
    // the expression, do the conversion.
2655
1.01k
    if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2656
2657
      // Truncate the integer result to the right size, note that TruncTy can be
2658
      // a pointer.
2659
35
      if (TruncTy->isFloatingPointTy())
2660
1
        Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2661
34
      else if (TruncTy->isPointerTy() && 
Tmp->getType()->isIntegerTy()0
) {
2662
0
        uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2663
0
        Tmp = Builder.CreateTrunc(Tmp,
2664
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2665
0
        Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2666
34
      } else if (Tmp->getType()->isPointerTy() && 
TruncTy->isIntegerTy()0
) {
2667
0
        uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2668
0
        Tmp = Builder.CreatePtrToInt(Tmp,
2669
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2670
0
        Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2671
34
      } else if (TruncTy->isIntegerTy()) {
2672
7
        Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2673
27
      } else if (TruncTy->isVectorTy()) {
2674
10
        Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2675
10
      }
2676
35
    }
2677
2678
1.01k
    LValue Dest = ResultRegDests[i];
2679
    // ResultTypeRequiresCast elements correspond to the first
2680
    // ResultTypeRequiresCast.size() elements of RegResults.
2681
1.01k
    if ((i < ResultTypeRequiresCast.size()) && 
ResultTypeRequiresCast[i]989
) {
2682
17
      unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2683
17
      Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2684
17
                                        ResultRegTypes[i]->getPointerTo());
2685
17
      if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
2686
1
        Builder.CreateStore(Tmp, A);
2687
1
        continue;
2688
1
      }
2689
2690
16
      QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2691
16
      if (Ty.isNull()) {
2692
3
        const Expr *OutExpr = S.getOutputExpr(i);
2693
3
        CGM.Error(
2694
3
            OutExpr->getExprLoc(),
2695
3
            "impossible constraint in asm: can't store value into a register");
2696
3
        return;
2697
3
      }
2698
13
      Dest = MakeAddrLValue(A, Ty);
2699
13
    }
2700
1.01k
    EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2701
1.01k
  }
2702
1.82k
}
2703
2704
960
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2705
960
  const RecordDecl *RD = S.getCapturedRecordDecl();
2706
960
  QualType RecordTy = getContext().getRecordType(RD);
2707
2708
  // Initialize the captured struct.
2709
960
  LValue SlotLV =
2710
960
    MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2711
2712
960
  RecordDecl::field_iterator CurField = RD->field_begin();
2713
960
  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2714
960
                                                 E = S.capture_init_end();
2715
2.31k
       I != E; 
++I, ++CurField1.35k
) {
2716
1.35k
    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2717
1.35k
    if (CurField->hasCapturedVLAType()) {
2718
47
      EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2719
1.30k
    } else {
2720
1.30k
      EmitInitializerForField(*CurField, LV, *I);
2721
1.30k
    }
2722
1.35k
  }
2723
2724
960
  return SlotLV;
2725
960
}
2726
2727
/// Generate an outlined function for the body of a CapturedStmt, store any
2728
/// captured variables into the captured struct, and call the outlined function.
2729
llvm::Function *
2730
27
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2731
27
  LValue CapStruct = InitCapturedStruct(S);
2732
2733
  // Emit the CapturedDecl
2734
27
  CodeGenFunction CGF(CGM, true);
2735
27
  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2736
27
  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2737
27
  delete CGF.CapturedStmtInfo;
2738
2739
  // Emit call to the helper function.
2740
27
  EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2741
2742
27
  return F;
2743
27
}
2744
2745
869
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2746
869
  LValue CapStruct = InitCapturedStruct(S);
2747
869
  return CapStruct.getAddress(*this);
2748
869
}
2749
2750
/// Creates the outlined function for a CapturedStmt.
2751
llvm::Function *
2752
960
CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2753
960
  assert(CapturedStmtInfo &&
2754
960
    "CapturedStmtInfo should be set when generating the captured function");
2755
0
  const CapturedDecl *CD = S.getCapturedDecl();
2756
960
  const RecordDecl *RD = S.getCapturedRecordDecl();
2757
960
  SourceLocation Loc = S.getBeginLoc();
2758
960
  assert(CD->hasBody() && "missing CapturedDecl body");
2759
2760
  // Build the argument list.
2761
0
  ASTContext &Ctx = CGM.getContext();
2762
960
  FunctionArgList Args;
2763
960
  Args.append(CD->param_begin(), CD->param_end());
2764
2765
  // Create the function declaration.
2766
960
  const CGFunctionInfo &FuncInfo =
2767
960
    CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2768
960
  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2769
2770
960
  llvm::Function *F =
2771
960
    llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2772
960
                           CapturedStmtInfo->getHelperName(), &CGM.getModule());
2773
960
  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2774
960
  if (CD->isNothrow())
2775
715
    F->addFnAttr(llvm::Attribute::NoUnwind);
2776
2777
  // Generate the function.
2778
960
  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2779
960
                CD->getBody()->getBeginLoc());
2780
  // Set the context parameter in CapturedStmtInfo.
2781
960
  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2782
960
  CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2783
2784
  // Initialize variable-length arrays.
2785
960
  LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2786
960
                                           Ctx.getTagDeclType(RD));
2787
1.35k
  for (auto *FD : RD->fields()) {
2788
1.35k
    if (FD->hasCapturedVLAType()) {
2789
47
      auto *ExprArg =
2790
47
          EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2791
47
              .getScalarVal();
2792
47
      auto VAT = FD->getCapturedVLAType();
2793
47
      VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2794
47
    }
2795
1.35k
  }
2796
2797
  // If 'this' is captured, load it into CXXThisValue.
2798
960
  if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2799
24
    FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2800
24
    LValue ThisLValue = EmitLValueForField(Base, FD);
2801
24
    CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2802
24
  }
2803
2804
960
  PGO.assignRegionCounters(GlobalDecl(CD), F);
2805
960
  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2806
960
  FinishFunction(CD->getBodyRBrace());
2807
2808
960
  return F;
2809
960
}