Coverage Report

Created: 2020-02-25 14:32

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGStmt.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Stmt nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGDebugInfo.h"
14
#include "CodeGenFunction.h"
15
#include "CodeGenModule.h"
16
#include "TargetInfo.h"
17
#include "clang/AST/Attr.h"
18
#include "clang/AST/StmtVisitor.h"
19
#include "clang/Basic/Builtins.h"
20
#include "clang/Basic/PrettyStackTrace.h"
21
#include "clang/Basic/TargetInfo.h"
22
#include "llvm/ADT/StringExtras.h"
23
#include "llvm/IR/DataLayout.h"
24
#include "llvm/IR/InlineAsm.h"
25
#include "llvm/IR/Intrinsics.h"
26
#include "llvm/IR/MDBuilder.h"
27
28
using namespace clang;
29
using namespace CodeGen;
30
31
//===----------------------------------------------------------------------===//
32
//                              Statement Emission
33
//===----------------------------------------------------------------------===//
34
35
587k
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
36
587k
  if (CGDebugInfo *DI = getDebugInfo()) {
37
308k
    SourceLocation Loc;
38
308k
    Loc = S->getBeginLoc();
39
308k
    DI->EmitLocation(Builder, Loc);
40
308k
41
308k
    LastStopPoint = Loc;
42
308k
  }
43
587k
}
44
45
649k
void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
46
649k
  assert(S && "Null statement?");
47
649k
  PGO.setCurrentStmt(S);
48
649k
49
649k
  // These statements have their own debug info handling.
50
649k
  if (EmitSimpleStmt(S))
51
225k
    return;
52
423k
53
423k
  // Check if we are generating unreachable code.
54
423k
  if (!HaveInsertPoint()) {
55
116
    // If so, and the statement doesn't contain a label, then we do not need to
56
116
    // generate actual code. This is safe because (1) the current point is
57
116
    // unreachable, so we don't need to execute the code, and (2) we've already
58
116
    // handled the statements which update internal data structures (like the
59
116
    // local variable map) which could be used by subsequent statements.
60
116
    if (!ContainsLabel(S)) {
61
112
      // Verify that any decl statements were handled as simple, they may be in
62
112
      // scope of subsequent reachable statements.
63
112
      assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
64
112
      return;
65
112
    }
66
4
67
4
    // Otherwise, make a new block to hold the code.
68
4
    EnsureInsertPoint();
69
4
  }
70
423k
71
423k
  // Generate a stoppoint if we are emitting debug info.
72
423k
  EmitStopPoint(S);
73
423k
74
423k
  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
75
423k
  // enabled.
76
423k
  if (getLangOpts().OpenMP && 
getLangOpts().OpenMPSimd64.8k
) {
77
35.0k
    if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
78
10.7k
      EmitSimpleOMPExecutableDirective(*D);
79
10.7k
      return;
80
10.7k
    }
81
413k
  }
82
413k
83
413k
  switch (S->getStmtClass()) {
84
0
  case Stmt::NoStmtClass:
85
0
  case Stmt::CXXCatchStmtClass:
86
0
  case Stmt::SEHExceptStmtClass:
87
0
  case Stmt::SEHFinallyStmtClass:
88
0
  case Stmt::MSDependentExistsStmtClass:
89
0
    llvm_unreachable("invalid statement class to emit generically");
90
0
  case Stmt::NullStmtClass:
91
0
  case Stmt::CompoundStmtClass:
92
0
  case Stmt::DeclStmtClass:
93
0
  case Stmt::LabelStmtClass:
94
0
  case Stmt::AttributedStmtClass:
95
0
  case Stmt::GotoStmtClass:
96
0
  case Stmt::BreakStmtClass:
97
0
  case Stmt::ContinueStmtClass:
98
0
  case Stmt::DefaultStmtClass:
99
0
  case Stmt::CaseStmtClass:
100
0
  case Stmt::SEHLeaveStmtClass:
101
0
    llvm_unreachable("should have emitted these statements as simple");
102
0
103
0
#define STMT(Type, Base)
104
0
#define ABSTRACT_STMT(Op)
105
0
#define EXPR(Type, Base) \
106
24.6M
  case Stmt::Type##Class:
107
210k
#include 
"clang/AST/StmtNodes.inc"0
108
210k
  {
109
210k
    // Remember the block we came in on.
110
210k
    llvm::BasicBlock *incoming = Builder.GetInsertBlock();
111
210k
    assert(incoming && "expression emission must have an insertion point");
112
210k
113
210k
    EmitIgnoredExpr(cast<Expr>(S));
114
210k
115
210k
    llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
116
210k
    assert(outgoing && "expression emission cleared block!");
117
210k
118
210k
    // The expression emitters assume (reasonably!) that the insertion
119
210k
    // point is always set.  To maintain that, the call-emission code
120
210k
    // for noreturn functions has to enter a new block with no
121
210k
    // predecessors.  We want to kill that block and mark the current
122
210k
    // insertion point unreachable in the common case of a call like
123
210k
    // "exit();".  Since expression emission doesn't otherwise create
124
210k
    // blocks with no predecessors, we can just test for that.
125
210k
    // However, we must be careful not to do this to our incoming
126
210k
    // block, because *statement* emission does sometimes create
127
210k
    // reachable blocks which will have no predecessors until later in
128
210k
    // the function.  This occurs with, e.g., labels that are not
129
210k
    // reachable by fallthrough.
130
210k
    if (incoming != outgoing && 
outgoing->use_empty()20.1k
) {
131
2.18k
      outgoing->eraseFromParent();
132
2.18k
      Builder.ClearInsertionPoint();
133
2.18k
    }
134
210k
    break;
135
24.4M
  }
136
24.4M
137
24.4M
  case Stmt::IndirectGotoStmtClass:
138
22
    EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
139
24.4M
140
24.4M
  
case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break61.4k
;
141
24.4M
  
case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break1.53k
;
142
24.4M
  
case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break553
;
143
24.4M
  
case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break8.40k
;
144
24.4M
145
24.4M
  
case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break117k
;
146
24.4M
147
24.4M
  
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break449
;
148
24.4M
  case Stmt::GCCAsmStmtClass:  // Intentional fall-through.
149
1.37k
  case Stmt::MSAsmStmtClass:   EmitAsmStmt(cast<AsmStmt>(*S));            break;
150
1.37k
  case Stmt::CoroutineBodyStmtClass:
151
44
    EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
152
44
    break;
153
1.37k
  case Stmt::CoreturnStmtClass:
154
42
    EmitCoreturnStmt(cast<CoreturnStmt>(*S));
155
42
    break;
156
1.37k
  case Stmt::CapturedStmtClass: {
157
27
    const CapturedStmt *CS = cast<CapturedStmt>(S);
158
27
    EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
159
27
    }
160
27
    break;
161
1.37k
  case Stmt::ObjCAtTryStmtClass:
162
218
    EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
163
218
    break;
164
1.37k
  case Stmt::ObjCAtCatchStmtClass:
165
0
    llvm_unreachable(
166
1.37k
                    "@catch statements should be handled by EmitObjCAtTryStmt");
167
1.37k
  case Stmt::ObjCAtFinallyStmtClass:
168
0
    llvm_unreachable(
169
1.37k
                  "@finally statements should be handled by EmitObjCAtTryStmt");
170
1.37k
  case Stmt::ObjCAtThrowStmtClass:
171
50
    EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
172
50
    break;
173
1.37k
  case Stmt::ObjCAtSynchronizedStmtClass:
174
13
    EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
175
13
    break;
176
1.37k
  case Stmt::ObjCForCollectionStmtClass:
177
73
    EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
178
73
    break;
179
1.37k
  case Stmt::ObjCAutoreleasePoolStmtClass:
180
113
    EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
181
113
    break;
182
1.37k
183
1.37k
  case Stmt::CXXTryStmtClass:
184
288
    EmitCXXTryStmt(cast<CXXTryStmt>(*S));
185
288
    break;
186
1.37k
  case Stmt::CXXForRangeStmtClass:
187
113
    EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
188
113
    break;
189
1.37k
  case Stmt::SEHTryStmtClass:
190
125
    EmitSEHTryStmt(cast<SEHTryStmt>(*S));
191
125
    break;
192
1.37k
  case Stmt::OMPParallelDirectiveClass:
193
716
    EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
194
716
    break;
195
1.37k
  case Stmt::OMPSimdDirectiveClass:
196
166
    EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
197
166
    break;
198
1.37k
  case Stmt::OMPForDirectiveClass:
199
281
    EmitOMPForDirective(cast<OMPForDirective>(*S));
200
281
    break;
201
1.37k
  case Stmt::OMPForSimdDirectiveClass:
202
241
    EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
203
241
    break;
204
1.37k
  case Stmt::OMPSectionsDirectiveClass:
205
52
    EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
206
52
    break;
207
1.37k
  case Stmt::OMPSectionDirectiveClass:
208
46
    EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
209
46
    break;
210
1.37k
  case Stmt::OMPSingleDirectiveClass:
211
39
    EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
212
39
    break;
213
1.37k
  case Stmt::OMPMasterDirectiveClass:
214
15
    EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
215
15
    break;
216
1.37k
  case Stmt::OMPCriticalDirectiveClass:
217
40
    EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
218
40
    break;
219
1.37k
  case Stmt::OMPParallelForDirectiveClass:
220
147
    EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
221
147
    break;
222
1.37k
  case Stmt::OMPParallelForSimdDirectiveClass:
223
84
    EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
224
84
    break;
225
1.37k
  case Stmt::OMPParallelMasterDirectiveClass:
226
24
    EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
227
24
    break;
228
1.37k
  case Stmt::OMPParallelSectionsDirectiveClass:
229
16
    EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
230
16
    break;
231
1.37k
  case Stmt::OMPTaskDirectiveClass:
232
121
    EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
233
121
    break;
234
1.37k
  case Stmt::OMPTaskyieldDirectiveClass:
235
16
    EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
236
16
    break;
237
1.37k
  case Stmt::OMPBarrierDirectiveClass:
238
27
    EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
239
27
    break;
240
1.37k
  case Stmt::OMPTaskwaitDirectiveClass:
241
12
    EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
242
12
    break;
243
1.37k
  case Stmt::OMPTaskgroupDirectiveClass:
244
39
    EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
245
39
    break;
246
1.37k
  case Stmt::OMPFlushDirectiveClass:
247
40
    EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
248
40
    break;
249
1.37k
  case Stmt::OMPOrderedDirectiveClass:
250
24
    EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
251
24
    break;
252
1.37k
  case Stmt::OMPAtomicDirectiveClass:
253
452
    EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
254
452
    break;
255
2.54k
  case Stmt::OMPTargetDirectiveClass:
256
2.54k
    EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
257
2.54k
    break;
258
1.37k
  case Stmt::OMPTeamsDirectiveClass:
259
835
    EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
260
835
    break;
261
1.37k
  case Stmt::OMPCancellationPointDirectiveClass:
262
38
    EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
263
38
    break;
264
1.37k
  case Stmt::OMPCancelDirectiveClass:
265
92
    EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
266
92
    break;
267
1.37k
  case Stmt::OMPTargetDataDirectiveClass:
268
115
    EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
269
115
    break;
270
1.37k
  case Stmt::OMPTargetEnterDataDirectiveClass:
271
65
    EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
272
65
    break;
273
1.37k
  case Stmt::OMPTargetExitDataDirectiveClass:
274
59
    EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
275
59
    break;
276
1.37k
  case Stmt::OMPTargetParallelDirectiveClass:
277
308
    EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
278
308
    break;
279
1.37k
  case Stmt::OMPTargetParallelForDirectiveClass:
280
200
    EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
281
200
    break;
282
1.37k
  case Stmt::OMPTaskLoopDirectiveClass:
283
35
    EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
284
35
    break;
285
1.37k
  case Stmt::OMPTaskLoopSimdDirectiveClass:
286
40
    EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
287
40
    break;
288
1.37k
  case Stmt::OMPMasterTaskLoopDirectiveClass:
289
35
    EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
290
35
    break;
291
1.37k
  case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
292
40
    EmitOMPMasterTaskLoopSimdDirective(
293
40
        cast<OMPMasterTaskLoopSimdDirective>(*S));
294
40
    break;
295
1.37k
  case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
296
33
    EmitOMPParallelMasterTaskLoopDirective(
297
33
        cast<OMPParallelMasterTaskLoopDirective>(*S));
298
33
    break;
299
1.37k
  case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
300
39
    EmitOMPParallelMasterTaskLoopSimdDirective(
301
39
        cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
302
39
    break;
303
1.37k
  case Stmt::OMPDistributeDirectiveClass:
304
110
    EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
305
110
    break;
306
1.37k
  case Stmt::OMPTargetUpdateDirectiveClass:
307
61
    EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
308
61
    break;
309
1.37k
  case Stmt::OMPDistributeParallelForDirectiveClass:
310
307
    EmitOMPDistributeParallelForDirective(
311
307
        cast<OMPDistributeParallelForDirective>(*S));
312
307
    break;
313
1.37k
  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
314
240
    EmitOMPDistributeParallelForSimdDirective(
315
240
        cast<OMPDistributeParallelForSimdDirective>(*S));
316
240
    break;
317
1.37k
  case Stmt::OMPDistributeSimdDirectiveClass:
318
150
    EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
319
150
    break;
320
1.37k
  case Stmt::OMPTargetParallelForSimdDirectiveClass:
321
217
    EmitOMPTargetParallelForSimdDirective(
322
217
        cast<OMPTargetParallelForSimdDirective>(*S));
323
217
    break;
324
1.37k
  case Stmt::OMPTargetSimdDirectiveClass:
325
225
    EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
326
225
    break;
327
1.37k
  case Stmt::OMPTeamsDistributeDirectiveClass:
328
114
    EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
329
114
    break;
330
1.37k
  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
331
126
    EmitOMPTeamsDistributeSimdDirective(
332
126
        cast<OMPTeamsDistributeSimdDirective>(*S));
333
126
    break;
334
1.37k
  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
335
224
    EmitOMPTeamsDistributeParallelForSimdDirective(
336
224
        cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
337
224
    break;
338
1.37k
  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
339
218
    EmitOMPTeamsDistributeParallelForDirective(
340
218
        cast<OMPTeamsDistributeParallelForDirective>(*S));
341
218
    break;
342
1.37k
  case Stmt::OMPTargetTeamsDirectiveClass:
343
381
    EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
344
381
    break;
345
1.37k
  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
346
261
    EmitOMPTargetTeamsDistributeDirective(
347
261
        cast<OMPTargetTeamsDistributeDirective>(*S));
348
261
    break;
349
1.37k
  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
350
266
    EmitOMPTargetTeamsDistributeParallelForDirective(
351
266
        cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
352
266
    break;
353
1.37k
  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
354
367
    EmitOMPTargetTeamsDistributeParallelForSimdDirective(
355
367
        cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
356
367
    break;
357
1.37k
  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
358
315
    EmitOMPTargetTeamsDistributeSimdDirective(
359
315
        cast<OMPTargetTeamsDistributeSimdDirective>(*S));
360
315
    break;
361
413k
  }
362
413k
}
363
364
649k
bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
365
649k
  switch (S->getStmtClass()) {
366
423k
  default: return false;
367
8.85k
  case Stmt::NullStmtClass: break;
368
70.6k
  case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
369
134k
  case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
370
189
  case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
371
83
  case Stmt::AttributedStmtClass:
372
83
                            EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
373
3.10k
  case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
374
2.90k
  case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
375
4.21k
  case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
376
167
  case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
377
887
  case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
378
9
  case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
379
225k
  }
380
225k
381
225k
  return true;
382
225k
}
383
384
/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
385
/// this captures the expression result of the last sub-statement and returns it
386
/// (for use by the statement expression extension).
387
Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
388
73.9k
                                          AggValueSlot AggSlot) {
389
73.9k
  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
390
73.9k
                             "LLVM IR generation of compound statement ('{}')");
391
73.9k
392
73.9k
  // Keep track of the current cleanup stack depth, including debug scopes.
393
73.9k
  LexicalScope Scope(*this, S.getSourceRange());
394
73.9k
395
73.9k
  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
396
73.9k
}
397
398
Address
399
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
400
                                              bool GetLast,
401
237k
                                              AggValueSlot AggSlot) {
402
237k
403
237k
  const Stmt *ExprResult = S.getStmtExprResult();
404
237k
  assert((!GetLast || (GetLast && ExprResult)) &&
405
237k
         "If GetLast is true then the CompoundStmt must have a StmtExprResult");
406
237k
407
237k
  Address RetAlloca = Address::invalid();
408
237k
409
497k
  for (auto *CurStmt : S.body()) {
410
497k
    if (GetLast && 
ExprResult == CurStmt11.5k
) {
411
2.67k
      // We have to special case labels here.  They are statements, but when put
412
2.67k
      // at the end of a statement expression, they yield the value of their
413
2.67k
      // subexpression.  Handle this by walking through all labels we encounter,
414
2.67k
      // emitting them before we evaluate the subexpr.
415
2.67k
      // Similar issues arise for attributed statements.
416
2.67k
      while (!isa<Expr>(ExprResult)) {
417
5
        if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
418
3
          EmitLabel(LS->getDecl());
419
3
          ExprResult = LS->getSubStmt();
420
3
        } else 
if (const auto *2
AS2
= dyn_cast<AttributedStmt>(ExprResult)) {
421
2
          // FIXME: Update this if we ever have attributes that affect the
422
2
          // semantics of an expression.
423
2
          ExprResult = AS->getSubStmt();
424
2
        } else {
425
0
          llvm_unreachable("unknown value statement");
426
0
        }
427
5
      }
428
2.67k
429
2.67k
      EnsureInsertPoint();
430
2.67k
431
2.67k
      const Expr *E = cast<Expr>(ExprResult);
432
2.67k
      QualType ExprTy = E->getType();
433
2.67k
      if (hasAggregateEvaluationKind(ExprTy)) {
434
597
        EmitAggExpr(E, AggSlot);
435
2.07k
      } else {
436
2.07k
        // We can't return an RValue here because there might be cleanups at
437
2.07k
        // the end of the StmtExpr.  Because of that, we have to emit the result
438
2.07k
        // here into a temporary alloca.
439
2.07k
        RetAlloca = CreateMemTemp(ExprTy);
440
2.07k
        EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
441
2.07k
                         /*IsInit*/ false);
442
2.07k
      }
443
494k
    } else {
444
494k
      EmitStmt(CurStmt);
445
494k
    }
446
497k
  }
447
237k
448
237k
  return RetAlloca;
449
237k
}
450
451
725
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
452
725
  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
453
725
454
725
  // If there is a cleanup stack, then we it isn't worth trying to
455
725
  // simplify this block (we would need to remove it from the scope map
456
725
  // and cleanup entry).
457
725
  if (!EHStack.empty())
458
56
    return;
459
669
460
669
  // Can only simplify direct branches.
461
669
  if (!BI || !BI->isUnconditional())
462
0
    return;
463
669
464
669
  // Can only simplify empty blocks.
465
669
  if (BI->getIterator() != BB->begin())
466
1
    return;
467
668
468
668
  BB->replaceAllUsesWith(BI->getSuccessor(0));
469
668
  BI->eraseFromParent();
470
668
  BB->eraseFromParent();
471
668
}
472
473
381k
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
474
381k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
475
381k
476
381k
  // Fall out of the current block (if necessary).
477
381k
  EmitBranch(BB);
478
381k
479
381k
  if (IsFinished && 
BB->use_empty()85.4k
) {
480
457
    delete BB;
481
457
    return;
482
457
  }
483
380k
484
380k
  // Place the block after the current block, if possible, or else at
485
380k
  // the end of the function.
486
380k
  if (CurBB && 
CurBB->getParent()250k
)
487
250k
    CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
488
130k
  else
489
130k
    CurFn->getBasicBlockList().push_back(BB);
490
380k
  Builder.SetInsertPoint(BB);
491
380k
}
492
493
485k
void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
494
485k
  // Emit a branch from the current block to the target one if this
495
485k
  // was a real block.  If this was just a fall-through block after a
496
485k
  // terminator, don't emit it.
497
485k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
498
485k
499
485k
  if (!CurBB || 
CurBB->getTerminator()340k
) {
500
314k
    // If there is no insert point or the previous block is already
501
314k
    // terminated, don't touch it.
502
314k
  } else {
503
171k
    // Otherwise, create a fall-through branch.
504
171k
    Builder.CreateBr(Target);
505
171k
  }
506
485k
507
485k
  Builder.ClearInsertionPoint();
508
485k
}
509
510
727
void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
511
727
  bool inserted = false;
512
727
  for (llvm::User *u : block->users()) {
513
727
    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
514
727
      CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
515
727
                                             block);
516
727
      inserted = true;
517
727
      break;
518
727
    }
519
727
  }
520
727
521
727
  if (!inserted)
522
0
    CurFn->getBasicBlockList().push_back(block);
523
727
524
727
  Builder.SetInsertPoint(block);
525
727
}
526
527
CodeGenFunction::JumpDest
528
3.21k
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
529
3.21k
  JumpDest &Dest = LabelMap[D];
530
3.21k
  if (Dest.isValid()) 
return Dest3.07k
;
531
132
532
132
  // Create, but don't insert, the new block.
533
132
  Dest = JumpDest(createBasicBlock(D->getName()),
534
132
                  EHScopeStack::stable_iterator::invalid(),
535
132
                  NextCleanupDestIndex++);
536
132
  return Dest;
537
132
}
538
539
192
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
540
192
  // Add this label to the current lexical scope if we're within any
541
192
  // normal cleanups.  Jumps "in" to this label --- when permitted by
542
192
  // the language --- may need to be routed around such cleanups.
543
192
  if (EHStack.hasNormalCleanups() && 
CurLexicalScope10
)
544
0
    CurLexicalScope->addLabel(D);
545
192
546
192
  JumpDest &Dest = LabelMap[D];
547
192
548
192
  // If we didn't need a forward reference to this label, just go
549
192
  // ahead and create a destination at the current scope.
550
192
  if (!Dest.isValid()) {
551
60
    Dest = getJumpDestInCurrentScope(D->getName());
552
60
553
60
  // Otherwise, we need to give this label a target depth and remove
554
60
  // it from the branch-fixups list.
555
132
  } else {
556
132
    assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
557
132
    Dest.setScopeDepth(EHStack.stable_begin());
558
132
    ResolveBranchFixups(Dest.getBlock());
559
132
  }
560
192
561
192
  EmitBlock(Dest.getBlock());
562
192
563
192
  // Emit debug info for labels.
564
192
  if (CGDebugInfo *DI = getDebugInfo()) {
565
19
    if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
566
19
      DI->setLocation(D->getLocation());
567
19
      DI->EmitLabel(D, Builder);
568
19
    }
569
19
  }
570
192
571
192
  incrementProfileCounter(D->getStmt());
572
192
}
573
574
/// Change the cleanup scope of the labels in this lexical scope to
575
/// match the scope of the enclosing context.
576
0
void CodeGenFunction::LexicalScope::rescopeLabels() {
577
0
  assert(!Labels.empty());
578
0
  EHScopeStack::stable_iterator innermostScope
579
0
    = CGF.EHStack.getInnermostNormalCleanup();
580
0
581
0
  // Change the scope depth of all the labels.
582
0
  for (SmallVectorImpl<const LabelDecl*>::const_iterator
583
0
         i = Labels.begin(), e = Labels.end(); i != e; ++i) {
584
0
    assert(CGF.LabelMap.count(*i));
585
0
    JumpDest &dest = CGF.LabelMap.find(*i)->second;
586
0
    assert(dest.getScopeDepth().isValid());
587
0
    assert(innermostScope.encloses(dest.getScopeDepth()));
588
0
    dest.setScopeDepth(innermostScope);
589
0
  }
590
0
591
0
  // Reparent the labels if the new scope also has cleanups.
592
0
  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
593
0
    ParentScope->Labels.append(Labels.begin(), Labels.end());
594
0
  }
595
0
}
596
597
598
189
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
599
189
  EmitLabel(S.getDecl());
600
189
  EmitStmt(S.getSubStmt());
601
189
}
602
603
83
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
604
83
  EmitStmt(S.getSubStmt(), S.getAttrs());
605
83
}
606
607
3.10k
void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
608
3.10k
  // If this code is reachable then emit a stop point (if generating
609
3.10k
  // debug info). We have to do this ourselves because we are on the
610
3.10k
  // "simple" statement path.
611
3.10k
  if (HaveInsertPoint())
612
3.10k
    EmitStopPoint(&S);
613
3.10k
614
3.10k
  EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
615
3.10k
}
616
617
618
22
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
619
22
  if (const LabelDecl *Target = S.getConstantTarget()) {
620
0
    EmitBranchThroughCleanup(getJumpDestForLabel(Target));
621
0
    return;
622
0
  }
623
22
624
22
  // Ensure that we have an i8* for our PHI node.
625
22
  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
626
22
                                         Int8PtrTy, "addr");
627
22
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
628
22
629
22
  // Get the basic block for the indirect goto.
630
22
  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
631
22
632
22
  // The first instruction in the block has to be the PHI for the switch dest,
633
22
  // add an entry for this branch.
634
22
  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
635
22
636
22
  EmitBranch(IndGotoBB);
637
22
}
638
639
61.4k
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
640
61.4k
  // C99 6.8.4.1: The first substatement is executed if the expression compares
641
61.4k
  // unequal to 0.  The condition must be a scalar type.
642
61.4k
  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
643
61.4k
644
61.4k
  if (S.getInit())
645
14
    EmitStmt(S.getInit());
646
61.4k
647
61.4k
  if (S.getConditionVariable())
648
22
    EmitDecl(*S.getConditionVariable());
649
61.4k
650
61.4k
  // If the condition constant folds and can be elided, try to avoid emitting
651
61.4k
  // the condition and the dead arm of the if/else.
652
61.4k
  bool CondConstant;
653
61.4k
  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
654
61.4k
                                   S.isConstexpr())) {
655
184
    // Figure out which block (then or else) is executed.
656
184
    const Stmt *Executed = S.getThen();
657
184
    const Stmt *Skipped  = S.getElse();
658
184
    if (!CondConstant)  // Condition false?
659
79
      std::swap(Executed, Skipped);
660
184
661
184
    // If the skipped block has no labels in it, just emit the executed block.
662
184
    // This avoids emitting dead code and simplifies the CFG substantially.
663
184
    if (S.isConstexpr() || 
!ContainsLabel(Skipped)179
) {
664
184
      if (CondConstant)
665
105
        incrementProfileCounter(&S);
666
184
      if (Executed) {
667
143
        RunCleanupsScope ExecutedScope(*this);
668
143
        EmitStmt(Executed);
669
143
      }
670
184
      return;
671
184
    }
672
61.2k
  }
673
61.2k
674
61.2k
  // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
675
61.2k
  // the conditional branch.
676
61.2k
  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
677
61.2k
  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
678
61.2k
  llvm::BasicBlock *ElseBlock = ContBlock;
679
61.2k
  if (S.getElse())
680
9.40k
    ElseBlock = createBasicBlock("if.else");
681
61.2k
682
61.2k
  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
683
61.2k
                       getProfileCount(S.getThen()));
684
61.2k
685
61.2k
  // Emit the 'then' code.
686
61.2k
  EmitBlock(ThenBlock);
687
61.2k
  incrementProfileCounter(&S);
688
61.2k
  {
689
61.2k
    RunCleanupsScope ThenScope(*this);
690
61.2k
    EmitStmt(S.getThen());
691
61.2k
  }
692
61.2k
  EmitBranch(ContBlock);
693
61.2k
694
61.2k
  // Emit the 'else' code if present.
695
61.2k
  if (const Stmt *Else = S.getElse()) {
696
9.40k
    {
697
9.40k
      // There is no need to emit line number for an unconditional branch.
698
9.40k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
699
9.40k
      EmitBlock(ElseBlock);
700
9.40k
    }
701
9.40k
    {
702
9.40k
      RunCleanupsScope ElseScope(*this);
703
9.40k
      EmitStmt(Else);
704
9.40k
    }
705
9.40k
    {
706
9.40k
      // There is no need to emit line number for an unconditional branch.
707
9.40k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
708
9.40k
      EmitBranch(ContBlock);
709
9.40k
    }
710
9.40k
  }
711
61.2k
712
61.2k
  // Emit the continuation block for code after the if.
713
61.2k
  EmitBlock(ContBlock, true);
714
61.2k
}
715
716
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
717
1.53k
                                    ArrayRef<const Attr *> WhileAttrs) {
718
1.53k
  // Emit the header for the loop, which will also become
719
1.53k
  // the continue target.
720
1.53k
  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
721
1.53k
  EmitBlock(LoopHeader.getBlock());
722
1.53k
723
1.53k
  const SourceRange &R = S.getSourceRange();
724
1.53k
  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
725
1.53k
                 SourceLocToDebugLoc(R.getBegin()),
726
1.53k
                 SourceLocToDebugLoc(R.getEnd()));
727
1.53k
728
1.53k
  // Create an exit block for when the condition fails, which will
729
1.53k
  // also become the break target.
730
1.53k
  JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
731
1.53k
732
1.53k
  // Store the blocks to use for break and continue.
733
1.53k
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
734
1.53k
735
1.53k
  // C++ [stmt.while]p2:
736
1.53k
  //   When the condition of a while statement is a declaration, the
737
1.53k
  //   scope of the variable that is declared extends from its point
738
1.53k
  //   of declaration (3.3.2) to the end of the while statement.
739
1.53k
  //   [...]
740
1.53k
  //   The object created in a condition is destroyed and created
741
1.53k
  //   with each iteration of the loop.
742
1.53k
  RunCleanupsScope ConditionScope(*this);
743
1.53k
744
1.53k
  if (S.getConditionVariable())
745
5
    EmitDecl(*S.getConditionVariable());
746
1.53k
747
1.53k
  // Evaluate the conditional in the while header.  C99 6.8.5.1: The
748
1.53k
  // evaluation of the controlling expression takes place before each
749
1.53k
  // execution of the loop body.
750
1.53k
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
751
1.53k
752
1.53k
  // while(1) is common, avoid extra exit blocks.  Be sure
753
1.53k
  // to correctly handle break/continue though.
754
1.53k
  bool EmitBoolCondBranch = true;
755
1.53k
  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
756
302
    if (C->isOne())
757
286
      EmitBoolCondBranch = false;
758
1.53k
759
1.53k
  // As long as the condition is true, go to the loop body.
760
1.53k
  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
761
1.53k
  if (EmitBoolCondBranch) {
762
1.24k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
763
1.24k
    if (ConditionScope.requiresCleanups())
764
3
      ExitBlock = createBasicBlock("while.exit");
765
1.24k
    Builder.CreateCondBr(
766
1.24k
        BoolCondVal, LoopBody, ExitBlock,
767
1.24k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
768
1.24k
769
1.24k
    if (ExitBlock != LoopExit.getBlock()) {
770
3
      EmitBlock(ExitBlock);
771
3
      EmitBranchThroughCleanup(LoopExit);
772
3
    }
773
1.24k
  }
774
1.53k
775
1.53k
  // Emit the loop body.  We have to emit this in a cleanup scope
776
1.53k
  // because it might be a singleton DeclStmt.
777
1.53k
  {
778
1.53k
    RunCleanupsScope BodyScope(*this);
779
1.53k
    EmitBlock(LoopBody);
780
1.53k
    incrementProfileCounter(&S);
781
1.53k
    EmitStmt(S.getBody());
782
1.53k
  }
783
1.53k
784
1.53k
  BreakContinueStack.pop_back();
785
1.53k
786
1.53k
  // Immediately force cleanup.
787
1.53k
  ConditionScope.ForceCleanup();
788
1.53k
789
1.53k
  EmitStopPoint(&S);
790
1.53k
  // Branch to the loop header again.
791
1.53k
  EmitBranch(LoopHeader.getBlock());
792
1.53k
793
1.53k
  LoopStack.pop();
794
1.53k
795
1.53k
  // Emit the exit block.
796
1.53k
  EmitBlock(LoopExit.getBlock(), true);
797
1.53k
798
1.53k
  // The LoopHeader typically is just a branch if we skipped emitting
799
1.53k
  // a branch, try to erase it.
800
1.53k
  if (!EmitBoolCondBranch)
801
286
    SimplifyForwardingBlocks(LoopHeader.getBlock());
802
1.53k
}
803
804
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
805
553
                                 ArrayRef<const Attr *> DoAttrs) {
806
553
  JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
807
553
  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
808
553
809
553
  uint64_t ParentCount = getCurrentProfileCount();
810
553
811
553
  // Store the blocks to use for break and continue.
812
553
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
813
553
814
553
  // Emit the body of the loop.
815
553
  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
816
553
817
553
  EmitBlockWithFallThrough(LoopBody, &S);
818
553
  {
819
553
    RunCleanupsScope BodyScope(*this);
820
553
    EmitStmt(S.getBody());
821
553
  }
822
553
823
553
  EmitBlock(LoopCond.getBlock());
824
553
825
553
  const SourceRange &R = S.getSourceRange();
826
553
  LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
827
553
                 SourceLocToDebugLoc(R.getBegin()),
828
553
                 SourceLocToDebugLoc(R.getEnd()));
829
553
830
553
  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
831
553
  // after each execution of the loop body."
832
553
833
553
  // Evaluate the conditional in the while header.
834
553
  // C99 6.8.5p2/p4: The first substatement is executed if the expression
835
553
  // compares unequal to 0.  The condition must be a scalar type.
836
553
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
837
553
838
553
  BreakContinueStack.pop_back();
839
553
840
553
  // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
841
553
  // to correctly handle break/continue though.
842
553
  bool EmitBoolCondBranch = true;
843
553
  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
844
445
    if (C->isZero())
845
439
      EmitBoolCondBranch = false;
846
553
847
553
  // As long as the condition is true, iterate the loop.
848
553
  if (EmitBoolCondBranch) {
849
114
    uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
850
114
    Builder.CreateCondBr(
851
114
        BoolCondVal, LoopBody, LoopExit.getBlock(),
852
114
        createProfileWeightsForLoop(S.getCond(), BackedgeCount));
853
114
  }
854
553
855
553
  LoopStack.pop();
856
553
857
553
  // Emit the exit block.
858
553
  EmitBlock(LoopExit.getBlock());
859
553
860
553
  // The DoCond block typically is just a branch if we skipped
861
553
  // emitting a branch, try to erase it.
862
553
  if (!EmitBoolCondBranch)
863
439
    SimplifyForwardingBlocks(LoopCond.getBlock());
864
553
}
865
866
void CodeGenFunction::EmitForStmt(const ForStmt &S,
867
8.40k
                                  ArrayRef<const Attr *> ForAttrs) {
868
8.40k
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
869
8.40k
870
8.40k
  LexicalScope ForScope(*this, S.getSourceRange());
871
8.40k
872
8.40k
  // Evaluate the first part before the loop.
873
8.40k
  if (S.getInit())
874
8.07k
    EmitStmt(S.getInit());
875
8.40k
876
8.40k
  // Start the loop with a block that tests the condition.
877
8.40k
  // If there's an increment, the continue scope will be overwritten
878
8.40k
  // later.
879
8.40k
  JumpDest Continue = getJumpDestInCurrentScope("for.cond");
880
8.40k
  llvm::BasicBlock *CondBlock = Continue.getBlock();
881
8.40k
  EmitBlock(CondBlock);
882
8.40k
883
8.40k
  const SourceRange &R = S.getSourceRange();
884
8.40k
  LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
885
8.40k
                 SourceLocToDebugLoc(R.getBegin()),
886
8.40k
                 SourceLocToDebugLoc(R.getEnd()));
887
8.40k
888
8.40k
  // If the for loop doesn't have an increment we can just use the
889
8.40k
  // condition as the continue block.  Otherwise we'll need to create
890
8.40k
  // a block for it (in the current scope, i.e. in the scope of the
891
8.40k
  // condition), and that we will become our continue block.
892
8.40k
  if (S.getInc())
893
8.32k
    Continue = getJumpDestInCurrentScope("for.inc");
894
8.40k
895
8.40k
  // Store the blocks to use for break and continue.
896
8.40k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
897
8.40k
898
8.40k
  // Create a cleanup scope for the condition variable cleanups.
899
8.40k
  LexicalScope ConditionScope(*this, S.getSourceRange());
900
8.40k
901
8.40k
  if (S.getCond()) {
902
8.37k
    // If the for statement has a condition scope, emit the local variable
903
8.37k
    // declaration.
904
8.37k
    if (S.getConditionVariable()) {
905
3
      EmitDecl(*S.getConditionVariable());
906
3
    }
907
8.37k
908
8.37k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
909
8.37k
    // If there are any cleanups between here and the loop-exit scope,
910
8.37k
    // create a block to stage a loop exit along.
911
8.37k
    if (ForScope.requiresCleanups())
912
116
      ExitBlock = createBasicBlock("for.cond.cleanup");
913
8.37k
914
8.37k
    // As long as the condition is true, iterate the loop.
915
8.37k
    llvm::BasicBlock *ForBody = createBasicBlock("for.body");
916
8.37k
917
8.37k
    // C99 6.8.5p2/p4: The first substatement is executed if the expression
918
8.37k
    // compares unequal to 0.  The condition must be a scalar type.
919
8.37k
    llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
920
8.37k
    Builder.CreateCondBr(
921
8.37k
        BoolCondVal, ForBody, ExitBlock,
922
8.37k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
923
8.37k
924
8.37k
    if (ExitBlock != LoopExit.getBlock()) {
925
116
      EmitBlock(ExitBlock);
926
116
      EmitBranchThroughCleanup(LoopExit);
927
116
    }
928
8.37k
929
8.37k
    EmitBlock(ForBody);
930
8.37k
  } else {
931
30
    // Treat it as a non-zero constant.  Don't even create a new block for the
932
30
    // body, just fall into it.
933
30
  }
934
8.40k
  incrementProfileCounter(&S);
935
8.40k
936
8.40k
  {
937
8.40k
    // Create a separate cleanup scope for the body, in case it is not
938
8.40k
    // a compound statement.
939
8.40k
    RunCleanupsScope BodyScope(*this);
940
8.40k
    EmitStmt(S.getBody());
941
8.40k
  }
942
8.40k
943
8.40k
  // If there is an increment, emit it next.
944
8.40k
  if (S.getInc()) {
945
8.32k
    EmitBlock(Continue.getBlock());
946
8.32k
    EmitStmt(S.getInc());
947
8.32k
  }
948
8.40k
949
8.40k
  BreakContinueStack.pop_back();
950
8.40k
951
8.40k
  ConditionScope.ForceCleanup();
952
8.40k
953
8.40k
  EmitStopPoint(&S);
954
8.40k
  EmitBranch(CondBlock);
955
8.40k
956
8.40k
  ForScope.ForceCleanup();
957
8.40k
958
8.40k
  LoopStack.pop();
959
8.40k
960
8.40k
  // Emit the fall-through block.
961
8.40k
  EmitBlock(LoopExit.getBlock(), true);
962
8.40k
}
963
964
void
965
CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
966
113
                                     ArrayRef<const Attr *> ForAttrs) {
967
113
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
968
113
969
113
  LexicalScope ForScope(*this, S.getSourceRange());
970
113
971
113
  // Evaluate the first pieces before the loop.
972
113
  if (S.getInit())
973
2
    EmitStmt(S.getInit());
974
113
  EmitStmt(S.getRangeStmt());
975
113
  EmitStmt(S.getBeginStmt());
976
113
  EmitStmt(S.getEndStmt());
977
113
978
113
  // Start the loop with a block that tests the condition.
979
113
  // If there's an increment, the continue scope will be overwritten
980
113
  // later.
981
113
  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
982
113
  EmitBlock(CondBlock);
983
113
984
113
  const SourceRange &R = S.getSourceRange();
985
113
  LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
986
113
                 SourceLocToDebugLoc(R.getBegin()),
987
113
                 SourceLocToDebugLoc(R.getEnd()));
988
113
989
113
  // If there are any cleanups between here and the loop-exit scope,
990
113
  // create a block to stage a loop exit along.
991
113
  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
992
113
  if (ForScope.requiresCleanups())
993
13
    ExitBlock = createBasicBlock("for.cond.cleanup");
994
113
995
113
  // The loop body, consisting of the specified body and the loop variable.
996
113
  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
997
113
998
113
  // The body is executed if the expression, contextually converted
999
113
  // to bool, is true.
1000
113
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1001
113
  Builder.CreateCondBr(
1002
113
      BoolCondVal, ForBody, ExitBlock,
1003
113
      createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
1004
113
1005
113
  if (ExitBlock != LoopExit.getBlock()) {
1006
13
    EmitBlock(ExitBlock);
1007
13
    EmitBranchThroughCleanup(LoopExit);
1008
13
  }
1009
113
1010
113
  EmitBlock(ForBody);
1011
113
  incrementProfileCounter(&S);
1012
113
1013
113
  // Create a block for the increment. In case of a 'continue', we jump there.
1014
113
  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1015
113
1016
113
  // Store the blocks to use for break and continue.
1017
113
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1018
113
1019
113
  {
1020
113
    // Create a separate cleanup scope for the loop variable and body.
1021
113
    LexicalScope BodyScope(*this, S.getSourceRange());
1022
113
    EmitStmt(S.getLoopVarStmt());
1023
113
    EmitStmt(S.getBody());
1024
113
  }
1025
113
1026
113
  EmitStopPoint(&S);
1027
113
  // If there is an increment, emit it next.
1028
113
  EmitBlock(Continue.getBlock());
1029
113
  EmitStmt(S.getInc());
1030
113
1031
113
  BreakContinueStack.pop_back();
1032
113
1033
113
  EmitBranch(CondBlock);
1034
113
1035
113
  ForScope.ForceCleanup();
1036
113
1037
113
  LoopStack.pop();
1038
113
1039
113
  // Emit the fall-through block.
1040
113
  EmitBlock(LoopExit.getBlock(), true);
1041
113
}
1042
1043
436
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1044
436
  if (RV.isScalar()) {
1045
436
    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1046
436
  } else 
if (0
RV.isAggregate()0
) {
1047
0
    LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1048
0
    LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1049
0
    EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1050
0
  } else {
1051
0
    EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1052
0
                       /*init*/ true);
1053
0
  }
1054
436
  EmitBranchThroughCleanup(ReturnBlock);
1055
436
}
1056
1057
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1058
/// if the function returns void, or may be missing one if the function returns
1059
/// non-void.  Fun stuff :).
1060
117k
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1061
117k
  if (requiresReturnValueCheck()) {
1062
15
    llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1063
15
    auto *SLocPtr =
1064
15
        new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1065
15
                                 llvm::GlobalVariable::PrivateLinkage, SLoc);
1066
15
    SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1067
15
    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1068
15
    assert(ReturnLocation.isValid() && "No valid return location");
1069
15
    Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1070
15
                        ReturnLocation);
1071
15
  }
1072
117k
1073
117k
  // Returning from an outlined SEH helper is UB, and we already warn on it.
1074
117k
  if (IsOutlinedSEHHelper) {
1075
8
    Builder.CreateUnreachable();
1076
8
    Builder.ClearInsertionPoint();
1077
8
  }
1078
117k
1079
117k
  // Emit the result value, even if unused, to evaluate the side effects.
1080
117k
  const Expr *RV = S.getRetValue();
1081
117k
1082
117k
  // Treat block literals in a return expression as if they appeared
1083
117k
  // in their own scope.  This permits a small, easily-implemented
1084
117k
  // exception to our over-conservative rules about not jumping to
1085
117k
  // statements following block literals with non-trivial cleanups.
1086
117k
  RunCleanupsScope cleanupScope(*this);
1087
117k
  if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
1088
5.12k
    enterFullExpression(fe);
1089
5.12k
    RV = fe->getSubExpr();
1090
5.12k
  }
1091
117k
1092
117k
  // FIXME: Clean this up by using an LValue for ReturnTemp,
1093
117k
  // EmitStoreThroughLValue, and EmitAnyExpr.
1094
117k
  if (getLangOpts().ElideConstructors &&
1095
117k
      
S.getNRVOCandidate()117k
&&
S.getNRVOCandidate()->isNRVOVariable()1.26k
) {
1096
1.25k
    // Apply the named return value optimization for this return statement,
1097
1.25k
    // which means doing nothing: the appropriate result has already been
1098
1.25k
    // constructed into the NRVO variable.
1099
1.25k
1100
1.25k
    // If there is an NRVO flag for this variable, set it to 1 into indicate
1101
1.25k
    // that the cleanup code should not destroy the variable.
1102
1.25k
    if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1103
194
      Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1104
115k
  } else if (!ReturnValue.isValid() || 
(113k
RV113k
&&
RV->getType()->isVoidType()113k
)) {
1105
2.34k
    // Make sure not to return anything, but evaluate the expression
1106
2.34k
    // for side effects.
1107
2.34k
    if (RV)
1108
1.56k
      EmitAnyExpr(RV);
1109
113k
  } else if (!RV) {
1110
2
    // Do nothing (return value is left uninitialized)
1111
113k
  } else if (FnRetTy->isReferenceType()) {
1112
21.3k
    // If this function returns a reference, take the address of the expression
1113
21.3k
    // rather than the value.
1114
21.3k
    RValue Result = EmitReferenceBindingToExpr(RV);
1115
21.3k
    Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1116
92.2k
  } else {
1117
92.2k
    switch (getEvaluationKind(RV->getType())) {
1118
86.4k
    case TEK_Scalar:
1119
86.4k
      Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1120
86.4k
      break;
1121
554
    case TEK_Complex:
1122
554
      EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1123
554
                                /*isInit*/ true);
1124
554
      break;
1125
5.23k
    case TEK_Aggregate:
1126
5.23k
      EmitAggExpr(RV, AggValueSlot::forAddr(
1127
5.23k
                          ReturnValue, Qualifiers(),
1128
5.23k
                          AggValueSlot::IsDestructed,
1129
5.23k
                          AggValueSlot::DoesNotNeedGCBarriers,
1130
5.23k
                          AggValueSlot::IsNotAliased,
1131
5.23k
                          getOverlapForReturnValue()));
1132
5.23k
      break;
1133
117k
    }
1134
117k
  }
1135
117k
1136
117k
  ++NumReturnExprs;
1137
117k
  if (!RV || 
RV->isEvaluatable(getContext())116k
)
1138
12.8k
    ++NumSimpleReturnExprs;
1139
117k
1140
117k
  cleanupScope.ForceCleanup();
1141
117k
  EmitBranchThroughCleanup(ReturnBlock);
1142
117k
}
1143
1144
134k
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1145
134k
  // As long as debug info is modeled with instructions, we have to ensure we
1146
134k
  // have a place to insert here and write the stop point here.
1147
134k
  if (HaveInsertPoint())
1148
134k
    EmitStopPoint(&S);
1149
134k
1150
134k
  for (const auto *I : S.decls())
1151
136k
    EmitDecl(*I);
1152
134k
}
1153
1154
2.90k
void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1155
2.90k
  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1156
2.90k
1157
2.90k
  // If this code is reachable then emit a stop point (if generating
1158
2.90k
  // debug info). We have to do this ourselves because we are on the
1159
2.90k
  // "simple" statement path.
1160
2.90k
  if (HaveInsertPoint())
1161
2.90k
    EmitStopPoint(&S);
1162
2.90k
1163
2.90k
  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1164
2.90k
}
1165
1166
4.21k
void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1167
4.21k
  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1168
4.21k
1169
4.21k
  // If this code is reachable then emit a stop point (if generating
1170
4.21k
  // debug info). We have to do this ourselves because we are on the
1171
4.21k
  // "simple" statement path.
1172
4.21k
  if (HaveInsertPoint())
1173
4.21k
    EmitStopPoint(&S);
1174
4.21k
1175
4.21k
  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1176
4.21k
}
1177
1178
/// EmitCaseStmtRange - If case statement range is not too big then
1179
/// add multiple cases to switch instruction, one for each value within
1180
/// the range. If range is too big then emit "if" condition check.
1181
35
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
1182
35
  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1183
35
1184
35
  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1185
35
  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1186
35
1187
35
  // Emit the code for this case. We do this first to make sure it is
1188
35
  // properly chained from our predecessor before generating the
1189
35
  // switch machinery to enter this block.
1190
35
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1191
35
  EmitBlockWithFallThrough(CaseDest, &S);
1192
35
  EmitStmt(S.getSubStmt());
1193
35
1194
35
  // If range is empty, do nothing.
1195
35
  if (LHS.isSigned() ? 
RHS.slt(LHS)31
:
RHS.ult(LHS)4
)
1196
4
    return;
1197
31
1198
31
  llvm::APInt Range = RHS - LHS;
1199
31
  // FIXME: parameters such as this should not be hardcoded.
1200
31
  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1201
18
    // Range is small enough to add multiple switch instruction cases.
1202
18
    uint64_t Total = getProfileCount(&S);
1203
18
    unsigned NCases = Range.getZExtValue() + 1;
1204
18
    // We only have one region counter for the entire set of cases here, so we
1205
18
    // need to divide the weights evenly between the generated cases, ensuring
1206
18
    // that the total weight is preserved. E.g., a weight of 5 over three cases
1207
18
    // will be distributed as weights of 2, 2, and 1.
1208
18
    uint64_t Weight = Total / NCases, Rem = Total % NCases;
1209
83
    for (unsigned I = 0; I != NCases; 
++I65
) {
1210
65
      if (SwitchWeights)
1211
18
        SwitchWeights->push_back(Weight + (Rem ? 
13
:
015
));
1212
65
      if (Rem)
1213
3
        Rem--;
1214
65
      SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1215
65
      ++LHS;
1216
65
    }
1217
18
    return;
1218
18
  }
1219
13
1220
13
  // The range is too big. Emit "if" condition into a new block,
1221
13
  // making sure to save and restore the current insertion point.
1222
13
  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1223
13
1224
13
  // Push this test onto the chain of range checks (which terminates
1225
13
  // in the default basic block). The switch's default will be changed
1226
13
  // to the top of this chain after switch emission is complete.
1227
13
  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1228
13
  CaseRangeBlock = createBasicBlock("sw.caserange");
1229
13
1230
13
  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1231
13
  Builder.SetInsertPoint(CaseRangeBlock);
1232
13
1233
13
  // Emit range check.
1234
13
  llvm::Value *Diff =
1235
13
    Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1236
13
  llvm::Value *Cond =
1237
13
    Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1238
13
1239
13
  llvm::MDNode *Weights = nullptr;
1240
13
  if (SwitchWeights) {
1241
6
    uint64_t ThisCount = getProfileCount(&S);
1242
6
    uint64_t DefaultCount = (*SwitchWeights)[0];
1243
6
    Weights = createProfileWeights(ThisCount, DefaultCount);
1244
6
1245
6
    // Since we're chaining the switch default through each large case range, we
1246
6
    // need to update the weight for the default, ie, the first case, to include
1247
6
    // this case.
1248
6
    (*SwitchWeights)[0] += ThisCount;
1249
6
  }
1250
13
  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1251
13
1252
13
  // Restore the appropriate insertion point.
1253
13
  if (RestoreBB)
1254
11
    Builder.SetInsertPoint(RestoreBB);
1255
2
  else
1256
2
    Builder.ClearInsertionPoint();
1257
13
}
1258
1259
887
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
1260
887
  // If there is no enclosing switch instance that we're aware of, then this
1261
887
  // case statement and its block can be elided.  This situation only happens
1262
887
  // when we've constant-folded the switch, are emitting the constant case,
1263
887
  // and part of the constant case includes another case statement.  For
1264
887
  // instance: switch (4) { case 4: do { case 5: } while (1); }
1265
887
  if (!SwitchInsn) {
1266
2
    EmitStmt(S.getSubStmt());
1267
2
    return;
1268
2
  }
1269
885
1270
885
  // Handle case ranges.
1271
885
  if (S.getRHS()) {
1272
35
    EmitCaseStmtRange(S);
1273
35
    return;
1274
35
  }
1275
850
1276
850
  llvm::ConstantInt *CaseVal =
1277
850
    Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1278
850
1279
850
  // If the body of the case is just a 'break', try to not emit an empty block.
1280
850
  // If we're profiling or we're not optimizing, leave the block in for better
1281
850
  // debug and coverage analysis.
1282
850
  if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1283
850
      
CGM.getCodeGenOpts().OptimizationLevel > 0820
&&
1284
850
      
isa<BreakStmt>(S.getSubStmt())82
) {
1285
0
    JumpDest Block = BreakContinueStack.back().BreakBlock;
1286
0
1287
0
    // Only do this optimization if there are no cleanups that need emitting.
1288
0
    if (isObviouslyBranchWithoutCleanups(Block)) {
1289
0
      if (SwitchWeights)
1290
0
        SwitchWeights->push_back(getProfileCount(&S));
1291
0
      SwitchInsn->addCase(CaseVal, Block.getBlock());
1292
0
1293
0
      // If there was a fallthrough into this case, make sure to redirect it to
1294
0
      // the end of the switch as well.
1295
0
      if (Builder.GetInsertBlock()) {
1296
0
        Builder.CreateBr(Block.getBlock());
1297
0
        Builder.ClearInsertionPoint();
1298
0
      }
1299
0
      return;
1300
0
    }
1301
850
  }
1302
850
1303
850
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1304
850
  EmitBlockWithFallThrough(CaseDest, &S);
1305
850
  if (SwitchWeights)
1306
44
    SwitchWeights->push_back(getProfileCount(&S));
1307
850
  SwitchInsn->addCase(CaseVal, CaseDest);
1308
850
1309
850
  // Recursively emitting the statement is acceptable, but is not wonderful for
1310
850
  // code where we have many case statements nested together, i.e.:
1311
850
  //  case 1:
1312
850
  //    case 2:
1313
850
  //      case 3: etc.
1314
850
  // Handling this recursively will create a new block for each case statement
1315
850
  // that falls through to the next case which is IR intensive.  It also causes
1316
850
  // deep recursion which can run into stack depth limitations.  Handle
1317
850
  // sequential non-range case statements specially.
1318
850
  const CaseStmt *CurCase = &S;
1319
850
  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1320
850
1321
850
  // Otherwise, iteratively add consecutive cases to this switch stmt.
1322
953
  while (NextCase && 
NextCase->getRHS() == nullptr109
) {
1323
103
    CurCase = NextCase;
1324
103
    llvm::ConstantInt *CaseVal =
1325
103
      Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1326
103
1327
103
    if (SwitchWeights)
1328
7
      SwitchWeights->push_back(getProfileCount(NextCase));
1329
103
    if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1330
3
      CaseDest = createBasicBlock("sw.bb");
1331
3
      EmitBlockWithFallThrough(CaseDest, &S);
1332
3
    }
1333
103
1334
103
    SwitchInsn->addCase(CaseVal, CaseDest);
1335
103
    NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1336
103
  }
1337
850
1338
850
  // Normal default recursion for non-cases.
1339
850
  EmitStmt(CurCase->getSubStmt());
1340
850
}
1341
1342
167
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1343
167
  // If there is no enclosing switch instance that we're aware of, then this
1344
167
  // default statement can be elided. This situation only happens when we've
1345
167
  // constant-folded the switch.
1346
167
  if (!SwitchInsn) {
1347
1
    EmitStmt(S.getSubStmt());
1348
1
    return;
1349
1
  }
1350
166
1351
166
  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1352
166
  assert(DefaultBlock->empty() &&
1353
166
         "EmitDefaultStmt: Default block already defined?");
1354
166
1355
166
  EmitBlockWithFallThrough(DefaultBlock, &S);
1356
166
1357
166
  EmitStmt(S.getSubStmt());
1358
166
}
1359
1360
/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1361
/// constant value that is being switched on, see if we can dead code eliminate
1362
/// the body of the switch to a simple series of statements to emit.  Basically,
1363
/// on a switch (5) we want to find these statements:
1364
///    case 5:
1365
///      printf(...);    <--
1366
///      ++i;            <--
1367
///      break;
1368
///
1369
/// and add them to the ResultStmts vector.  If it is unsafe to do this
1370
/// transformation (for example, one of the elided statements contains a label
1371
/// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1372
/// should include statements after it (e.g. the printf() line is a substmt of
1373
/// the case) then return CSFC_FallThrough.  If we handled it and found a break
1374
/// statement, then return CSFC_Success.
1375
///
1376
/// If Case is non-null, then we are looking for the specified case, checking
1377
/// that nothing we jump over contains labels.  If Case is null, then we found
1378
/// the case and are looking for the break.
1379
///
1380
/// If the recursive walk actually finds our Case, then we set FoundCase to
1381
/// true.
1382
///
1383
enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1384
static CSFC_Result CollectStatementsForCase(const Stmt *S,
1385
                                            const SwitchCase *Case,
1386
                                            bool &FoundCase,
1387
497
                              SmallVectorImpl<const Stmt*> &ResultStmts) {
1388
497
  // If this is a null statement, just succeed.
1389
497
  if (!S)
1390
0
    return Case ? CSFC_Success : CSFC_FallThrough;
1391
497
1392
497
  // If this is the switchcase (case 4: or default) that we're looking for, then
1393
497
  // we're in business.  Just add the substatement.
1394
497
  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1395
146
    if (S == Case) {
1396
78
      FoundCase = true;
1397
78
      return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1398
78
                                      ResultStmts);
1399
78
    }
1400
68
1401
68
    // Otherwise, this is some other case or default statement, just ignore it.
1402
68
    return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1403
68
                                    ResultStmts);
1404
68
  }
1405
351
1406
351
  // If we are in the live part of the code and we found our break statement,
1407
351
  // return a success!
1408
351
  if (!Case && 
isa<BreakStmt>(S)146
)
1409
60
    return CSFC_Success;
1410
291
1411
291
  // If this is a switch statement, then it might contain the SwitchCase, the
1412
291
  // break, or neither.
1413
291
  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1414
83
    // Handle this as two cases: we might be looking for the SwitchCase (if so
1415
83
    // the skipped statements must be skippable) or we might already have it.
1416
83
    CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1417
83
    bool StartedInLiveCode = FoundCase;
1418
83
    unsigned StartSize = ResultStmts.size();
1419
83
1420
83
    // If we've not found the case yet, scan through looking for it.
1421
83
    if (Case) {
1422
76
      // Keep track of whether we see a skipped declaration.  The code could be
1423
76
      // using the declaration even if it is skipped, so we can't optimize out
1424
76
      // the decl if the kept statements might refer to it.
1425
76
      bool HadSkippedDecl = false;
1426
76
1427
76
      // If we're looking for the case, just see if we can skip each of the
1428
76
      // substatements.
1429
268
      for (; Case && 
I != E206
;
++I192
) {
1430
205
        HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1431
205
1432
205
        switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1433
2
        case CSFC_Failure: return CSFC_Failure;
1434
138
        case CSFC_Success:
1435
138
          // A successful result means that either 1) that the statement doesn't
1436
138
          // have the case and is skippable, or 2) does contain the case value
1437
138
          // and also contains the break to exit the switch.  In the later case,
1438
138
          // we just verify the rest of the statements are elidable.
1439
138
          if (FoundCase) {
1440
8
            // If we found the case and skipped declarations, we can't do the
1441
8
            // optimization.
1442
8
            if (HadSkippedDecl)
1443
0
              return CSFC_Failure;
1444
8
1445
18
            
for (++I; 8
I != E;
++I10
)
1446
10
              if (CodeGenFunction::ContainsLabel(*I, true))
1447
0
                return CSFC_Failure;
1448
8
            return CSFC_Success;
1449
130
          }
1450
130
          break;
1451
130
        case CSFC_FallThrough:
1452
65
          // If we have a fallthrough condition, then we must have found the
1453
65
          // case started to include statements.  Consider the rest of the
1454
65
          // statements in the compound statement as candidates for inclusion.
1455
65
          assert(FoundCase && "Didn't find case but returned fallthrough?");
1456
65
          // We recursively found Case, so we're not looking for it anymore.
1457
65
          Case = nullptr;
1458
65
1459
65
          // If we found the case and skipped declarations, we can't do the
1460
65
          // optimization.
1461
65
          if (HadSkippedDecl)
1462
3
            return CSFC_Failure;
1463
62
          break;
1464
205
        }
1465
205
      }
1466
76
1467
76
      
if (63
!FoundCase63
)
1468
1
        return CSFC_Success;
1469
62
1470
62
      assert(!HadSkippedDecl && "fallthrough after skipping decl");
1471
62
    }
1472
83
1473
83
    // If we have statements in our range, then we know that the statements are
1474
83
    // live and need to be added to the set of statements we're tracking.
1475
83
    bool AnyDecls = false;
1476
84
    for (; I != E; 
++I15
) {
1477
68
      AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1478
68
1479
68
      switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1480
0
      case CSFC_Failure: return CSFC_Failure;
1481
15
      case CSFC_FallThrough:
1482
15
        // A fallthrough result means that the statement was simple and just
1483
15
        // included in ResultStmt, keep adding them afterwards.
1484
15
        break;
1485
53
      case CSFC_Success:
1486
53
        // A successful result means that we found the break statement and
1487
53
        // stopped statement inclusion.  We just ensure that any leftover stmts
1488
53
        // are skippable and return success ourselves.
1489
165
        for (++I; I != E; 
++I112
)
1490
112
          if (CodeGenFunction::ContainsLabel(*I, true))
1491
0
            return CSFC_Failure;
1492
53
        return CSFC_Success;
1493
68
      }
1494
68
    }
1495
69
1496
69
    // If we're about to fall out of a scope without hitting a 'break;', we
1497
69
    // can't perform the optimization if there were any decls in that scope
1498
69
    // (we'd lose their end-of-lifetime).
1499
69
    
if (16
AnyDecls16
) {
1500
3
      // If the entire compound statement was live, there's one more thing we
1501
3
      // can try before giving up: emit the whole thing as a single statement.
1502
3
      // We can do that unless the statement contains a 'break;'.
1503
3
      // FIXME: Such a break must be at the end of a construct within this one.
1504
3
      // We could emit this by just ignoring the BreakStmts entirely.
1505
3
      if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1506
3
        ResultStmts.resize(StartSize);
1507
3
        ResultStmts.push_back(S);
1508
3
      } else {
1509
0
        return CSFC_Failure;
1510
0
      }
1511
16
    }
1512
16
1513
16
    return CSFC_FallThrough;
1514
16
  }
1515
208
1516
208
  // Okay, this is some other statement that we don't handle explicitly, like a
1517
208
  // for statement or increment etc.  If we are skipping over this statement,
1518
208
  // just verify it doesn't have labels, which would make it invalid to elide.
1519
208
  if (Case) {
1520
129
    if (CodeGenFunction::ContainsLabel(S, true))
1521
0
      return CSFC_Failure;
1522
129
    return CSFC_Success;
1523
129
  }
1524
79
1525
79
  // Otherwise, we want to include this statement.  Everything is cool with that
1526
79
  // so long as it doesn't contain a break out of the switch we're in.
1527
79
  if (CodeGenFunction::containsBreak(S)) 
return CSFC_Failure1
;
1528
78
1529
78
  // Otherwise, everything is great.  Include the statement and tell the caller
1530
78
  // that we fall through and include the next statement as well.
1531
78
  ResultStmts.push_back(S);
1532
78
  return CSFC_FallThrough;
1533
78
}
1534
1535
/// FindCaseStatementsForValue - Find the case statement being jumped to and
1536
/// then invoke CollectStatementsForCase to find the list of statements to emit
1537
/// for a switch on constant.  See the comment above CollectStatementsForCase
1538
/// for more details.
1539
static bool FindCaseStatementsForValue(const SwitchStmt &S,
1540
                                       const llvm::APSInt &ConstantCondValue,
1541
                                SmallVectorImpl<const Stmt*> &ResultStmts,
1542
                                       ASTContext &C,
1543
94
                                       const SwitchCase *&ResultCase) {
1544
94
  // First step, find the switch case that is being branched to.  We can do this
1545
94
  // efficiently by scanning the SwitchCase list.
1546
94
  const SwitchCase *Case = S.getSwitchCaseList();
1547
94
  const DefaultStmt *DefaultCase = nullptr;
1548
94
1549
234
  for (; Case; 
Case = Case->getNextSwitchCase()140
) {
1550
198
    // It's either a default or case.  Just remember the default statement in
1551
198
    // case we're not jumping to any numbered cases.
1552
198
    if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1553
66
      DefaultCase = DS;
1554
66
      continue;
1555
66
    }
1556
132
1557
132
    // Check to see if this case is the one we're looking for.
1558
132
    const CaseStmt *CS = cast<CaseStmt>(Case);
1559
132
    // Don't handle case ranges yet.
1560
132
    if (CS->getRHS()) 
return false7
;
1561
125
1562
125
    // If we found our case, remember it as 'case'.
1563
125
    if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1564
51
      break;
1565
125
  }
1566
94
1567
94
  // If we didn't find a matching case, we use a default if it exists, or we
1568
94
  // elide the whole switch body!
1569
94
  
if (87
!Case87
) {
1570
36
    // It is safe to elide the body of the switch if it doesn't contain labels
1571
36
    // etc.  If it is safe, return successfully with an empty ResultStmts list.
1572
36
    if (!DefaultCase)
1573
9
      return !CodeGenFunction::ContainsLabel(&S);
1574
27
    Case = DefaultCase;
1575
27
  }
1576
87
1577
87
  // Ok, we know which case is being jumped to, try to collect all the
1578
87
  // statements that follow it.  This can fail for a variety of reasons.  Also,
1579
87
  // check to see that the recursive walk actually found our case statement.
1580
87
  // Insane cases like this can fail to find it in the recursive walk since we
1581
87
  // don't handle every stmt kind:
1582
87
  // switch (4) {
1583
87
  //   while (1) {
1584
87
  //     case 4: ...
1585
87
  bool FoundCase = false;
1586
78
  ResultCase = Case;
1587
78
  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1588
78
                                  ResultStmts) != CSFC_Failure &&
1589
78
         
FoundCase74
;
1590
87
}
1591
1592
449
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1593
449
  // Handle nested switch statements.
1594
449
  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1595
449
  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1596
449
  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1597
449
1598
449
  // See if we can constant fold the condition of the switch and therefore only
1599
449
  // emit the live case statement (if any) of the switch.
1600
449
  llvm::APSInt ConstantCondValue;
1601
449
  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1602
94
    SmallVector<const Stmt*, 4> CaseStmts;
1603
94
    const SwitchCase *Case = nullptr;
1604
94
    if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1605
94
                                   getContext(), Case)) {
1606
83
      if (Case)
1607
74
        incrementProfileCounter(Case);
1608
83
      RunCleanupsScope ExecutedScope(*this);
1609
83
1610
83
      if (S.getInit())
1611
0
        EmitStmt(S.getInit());
1612
83
1613
83
      // Emit the condition variable if needed inside the entire cleanup scope
1614
83
      // used by this special case for constant folded switches.
1615
83
      if (S.getConditionVariable())
1616
0
        EmitDecl(*S.getConditionVariable());
1617
83
1618
83
      // At this point, we are no longer "within" a switch instance, so
1619
83
      // we can temporarily enforce this to ensure that any embedded case
1620
83
      // statements are not emitted.
1621
83
      SwitchInsn = nullptr;
1622
83
1623
83
      // Okay, we can dead code eliminate everything except this case.  Emit the
1624
83
      // specified series of statements and we're good.
1625
155
      for (unsigned i = 0, e = CaseStmts.size(); i != e; 
++i72
)
1626
72
        EmitStmt(CaseStmts[i]);
1627
83
      incrementProfileCounter(&S);
1628
83
1629
83
      // Now we want to restore the saved switch instance so that nested
1630
83
      // switches continue to function properly
1631
83
      SwitchInsn = SavedSwitchInsn;
1632
83
1633
83
      return;
1634
83
    }
1635
366
  }
1636
366
1637
366
  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1638
366
1639
366
  RunCleanupsScope ConditionScope(*this);
1640
366
1641
366
  if (S.getInit())
1642
7
    EmitStmt(S.getInit());
1643
366
1644
366
  if (S.getConditionVariable())
1645
4
    EmitDecl(*S.getConditionVariable());
1646
366
  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1647
366
1648
366
  // Create basic block to hold stuff that comes after switch
1649
366
  // statement. We also need to create a default block now so that
1650
366
  // explicit case ranges tests can have a place to jump to on
1651
366
  // failure.
1652
366
  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1653
366
  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1654
366
  if (PGO.haveRegionCounts()) {
1655
22
    // Walk the SwitchCase list to find how many there are.
1656
22
    uint64_t DefaultCount = 0;
1657
22
    unsigned NumCases = 0;
1658
22
    for (const SwitchCase *Case = S.getSwitchCaseList();
1659
101
         Case;
1660
79
         Case = Case->getNextSwitchCase()) {
1661
79
      if (isa<DefaultStmt>(Case))
1662
16
        DefaultCount = getProfileCount(Case);
1663
79
      NumCases += 1;
1664
79
    }
1665
22
    SwitchWeights = new SmallVector<uint64_t, 16>();
1666
22
    SwitchWeights->reserve(NumCases);
1667
22
    // The default needs to be first. We store the edge count, so we already
1668
22
    // know the right weight.
1669
22
    SwitchWeights->push_back(DefaultCount);
1670
22
  }
1671
366
  CaseRangeBlock = DefaultBlock;
1672
366
1673
366
  // Clear the insertion point to indicate we are in unreachable code.
1674
366
  Builder.ClearInsertionPoint();
1675
366
1676
366
  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1677
366
  // then reuse last ContinueBlock.
1678
366
  JumpDest OuterContinue;
1679
366
  if (!BreakContinueStack.empty())
1680
44
    OuterContinue = BreakContinueStack.back().ContinueBlock;
1681
366
1682
366
  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1683
366
1684
366
  // Emit switch body.
1685
366
  EmitStmt(S.getBody());
1686
366
1687
366
  BreakContinueStack.pop_back();
1688
366
1689
366
  // Update the default block in case explicit case range tests have
1690
366
  // been chained on top.
1691
366
  SwitchInsn->setDefaultDest(CaseRangeBlock);
1692
366
1693
366
  // If a default was never emitted:
1694
366
  if (!DefaultBlock->getParent()) {
1695
200
    // If we have cleanups, emit the default block so that there's a
1696
200
    // place to jump through the cleanups from.
1697
200
    if (ConditionScope.requiresCleanups()) {
1698
0
      EmitBlock(DefaultBlock);
1699
0
1700
0
    // Otherwise, just forward the default block to the switch end.
1701
200
    } else {
1702
200
      DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1703
200
      delete DefaultBlock;
1704
200
    }
1705
200
  }
1706
366
1707
366
  ConditionScope.ForceCleanup();
1708
366
1709
366
  // Emit continuation.
1710
366
  EmitBlock(SwitchExit.getBlock(), true);
1711
366
  incrementProfileCounter(&S);
1712
366
1713
366
  // If the switch has a condition wrapped by __builtin_unpredictable,
1714
366
  // create metadata that specifies that the switch is unpredictable.
1715
366
  // Don't bother if not optimizing because that metadata would not be used.
1716
366
  auto *Call = dyn_cast<CallExpr>(S.getCond());
1717
366
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 038
) {
1718
10
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1719
10
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1720
2
      llvm::MDBuilder MDHelper(getLLVMContext());
1721
2
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1722
2
                              MDHelper.createUnpredictable());
1723
2
    }
1724
10
  }
1725
366
1726
366
  if (SwitchWeights) {
1727
22
    assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1728
22
           "switch weights do not match switch cases");
1729
22
    // If there's only one jump destination there's no sense weighting it.
1730
22
    if (SwitchWeights->size() > 1)
1731
18
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1732
18
                              createProfileWeights(*SwitchWeights));
1733
22
    delete SwitchWeights;
1734
22
  }
1735
366
  SwitchInsn = SavedSwitchInsn;
1736
366
  SwitchWeights = SavedSwitchWeights;
1737
366
  CaseRangeBlock = SavedCRBlock;
1738
366
}
1739
1740
static std::string
1741
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1742
2.33k
                 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1743
2.33k
  std::string Result;
1744
2.33k
1745
6.04k
  while (*Constraint) {
1746
3.70k
    switch (*Constraint) {
1747
2.92k
    default:
1748
2.92k
      Result += Target.convertConstraint(Constraint);
1749
2.92k
      break;
1750
0
    // Ignore these
1751
3
    case '*':
1752
3
    case '?':
1753
3
    case '!':
1754
3
    case '=': // Will see this and the following in mult-alt constraints.
1755
3
    case '+':
1756
3
      break;
1757
3
    case '#': // Ignore the rest of the constraint alternative.
1758
3
      while (Constraint[1] && Constraint[1] != ',')
1759
2
        Constraint++;
1760
1
      break;
1761
16
    case '&':
1762
16
    case '%':
1763
16
      Result += *Constraint;
1764
18
      while (Constraint[1] && Constraint[1] == *Constraint)
1765
2
        Constraint++;
1766
16
      break;
1767
621
    case ',':
1768
621
      Result += "|";
1769
621
      break;
1770
143
    case 'g':
1771
143
      Result += "imr";
1772
143
      break;
1773
16
    case '[': {
1774
0
      assert(OutCons &&
1775
0
             "Must pass output names to constraints with a symbolic name");
1776
0
      unsigned Index;
1777
0
      bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1778
0
      assert(result && "Could not resolve symbolic name"); (void)result;
1779
0
      Result += llvm::utostr(Index);
1780
0
      break;
1781
3.70k
    }
1782
3.70k
    }
1783
3.70k
1784
3.70k
    Constraint++;
1785
3.70k
  }
1786
2.33k
1787
2.33k
  return Result;
1788
2.33k
}
1789
1790
/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1791
/// as using a particular register add that as a constraint that will be used
1792
/// in this asm stmt.
1793
static std::string
1794
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1795
                       const TargetInfo &Target, CodeGenModule &CGM,
1796
2.33k
                       const AsmStmt &Stmt, const bool EarlyClobber) {
1797
2.33k
  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1798
2.33k
  if (!AsmDeclRef)
1799
523
    return Constraint;
1800
1.81k
  const ValueDecl &Value = *AsmDeclRef->getDecl();
1801
1.81k
  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1802
1.81k
  if (!Variable)
1803
11
    return Constraint;
1804
1.80k
  if (Variable->getStorageClass() != SC_Register)
1805
1.16k
    return Constraint;
1806
637
  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1807
637
  if (!Attr)
1808
576
    return Constraint;
1809
61
  StringRef Register = Attr->getLabel();
1810
61
  assert(Target.isValidGCCRegisterName(Register));
1811
61
  // We're using validateOutputConstraint here because we only care if
1812
61
  // this is a register constraint.
1813
61
  TargetInfo::ConstraintInfo Info(Constraint, "");
1814
61
  if (Target.validateOutputConstraint(Info) &&
1815
61
      
!Info.allowsRegister()0
) {
1816
0
    CGM.ErrorUnsupported(&Stmt, "__asm__");
1817
0
    return Constraint;
1818
0
  }
1819
61
  // Canonicalize the register here before returning it.
1820
61
  Register = Target.getNormalizedGCCRegisterName(Register);
1821
61
  return (EarlyClobber ? 
"&{"4
:
"{"57
) + Register.str() + "}";
1822
61
}
1823
1824
llvm::Value*
1825
CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1826
                                    LValue InputValue, QualType InputType,
1827
                                    std::string &ConstraintStr,
1828
283
                                    SourceLocation Loc) {
1829
283
  llvm::Value *Arg;
1830
283
  if (Info.allowsRegister() || 
!Info.allowsMemory()160
) {
1831
123
    if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1832
120
      Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1833
120
    } else {
1834
3
      llvm::Type *Ty = ConvertType(InputType);
1835
3
      uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1836
3
      if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1837
3
        Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1838
3
        Ty = llvm::PointerType::getUnqual(Ty);
1839
3
1840
3
        Arg = Builder.CreateLoad(
1841
3
            Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
1842
3
      } else {
1843
0
        Arg = InputValue.getPointer(*this);
1844
0
        ConstraintStr += '*';
1845
0
      }
1846
3
    }
1847
160
  } else {
1848
160
    Arg = InputValue.getPointer(*this);
1849
160
    ConstraintStr += '*';
1850
160
  }
1851
283
1852
283
  return Arg;
1853
283
}
1854
1855
llvm::Value* CodeGenFunction::EmitAsmInput(
1856
                                         const TargetInfo::ConstraintInfo &Info,
1857
                                           const Expr *InputExpr,
1858
1.27k
                                           std::string &ConstraintStr) {
1859
1.27k
  // If this can't be a register or memory, i.e., has to be a constant
1860
1.27k
  // (immediate or symbolic), try to emit it as such.
1861
1.27k
  if (!Info.allowsRegister() && 
!Info.allowsMemory()279
) {
1862
131
    if (Info.requiresImmediateConstant()) {
1863
58
      Expr::EvalResult EVResult;
1864
58
      InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
1865
58
1866
58
      llvm::APSInt IntResult;
1867
58
      if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
1868
58
                                          getContext()))
1869
57
        return llvm::ConstantInt::get(getLLVMContext(), IntResult);
1870
74
    }
1871
74
1872
74
    Expr::EvalResult Result;
1873
74
    if (InputExpr->EvaluateAsInt(Result, getContext()))
1874
26
      return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
1875
1.18k
  }
1876
1.18k
1877
1.18k
  if (Info.allowsRegister() || 
!Info.allowsMemory()196
)
1878
1.03k
    if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1879
1.03k
      return EmitScalarExpr(InputExpr);
1880
151
  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1881
3
    return EmitScalarExpr(InputExpr);
1882
148
  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1883
148
  LValue Dest = EmitLValue(InputExpr);
1884
148
  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1885
148
                            InputExpr->getExprLoc());
1886
148
}
1887
1888
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1889
/// asm call instruction.  The !srcloc MDNode contains a list of constant
1890
/// integers which are the source locations of the start of each line in the
1891
/// asm.
1892
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1893
1.22k
                                      CodeGenFunction &CGF) {
1894
1.22k
  SmallVector<llvm::Metadata *, 8> Locs;
1895
1.22k
  // Add the location of the first line to the MDNode.
1896
1.22k
  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1897
1.22k
      CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
1898
1.22k
  StringRef StrVal = Str->getString();
1899
1.22k
  if (!StrVal.empty()) {
1900
1.17k
    const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1901
1.17k
    const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1902
1.17k
    unsigned StartToken = 0;
1903
1.17k
    unsigned ByteOffset = 0;
1904
1.17k
1905
1.17k
    // Add the location of the start of each subsequent line of the asm to the
1906
1.17k
    // MDNode.
1907
18.4k
    for (unsigned i = 0, e = StrVal.size() - 1; i != e; 
++i17.3k
) {
1908
17.3k
      if (StrVal[i] != '\n') 
continue17.0k
;
1909
248
      SourceLocation LineLoc = Str->getLocationOfByte(
1910
248
          i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1911
248
      Locs.push_back(llvm::ConstantAsMetadata::get(
1912
248
          llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1913
248
    }
1914
1.17k
  }
1915
1.22k
1916
1.22k
  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1917
1.22k
}
1918
1919
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
1920
                              bool ReadOnly, bool ReadNone, const AsmStmt &S,
1921
                              const std::vector<llvm::Type *> &ResultRegTypes,
1922
                              CodeGenFunction &CGF,
1923
1.37k
                              std::vector<llvm::Value *> &RegResults) {
1924
1.37k
  Result.addAttribute(llvm::AttributeList::FunctionIndex,
1925
1.37k
                      llvm::Attribute::NoUnwind);
1926
1.37k
  // Attach readnone and readonly attributes.
1927
1.37k
  if (!HasSideEffect) {
1928
749
    if (ReadNone)
1929
289
      Result.addAttribute(llvm::AttributeList::FunctionIndex,
1930
289
                          llvm::Attribute::ReadNone);
1931
460
    else if (ReadOnly)
1932
319
      Result.addAttribute(llvm::AttributeList::FunctionIndex,
1933
319
                          llvm::Attribute::ReadOnly);
1934
749
  }
1935
1.37k
1936
1.37k
  // Slap the source location of the inline asm into a !srcloc metadata on the
1937
1.37k
  // call.
1938
1.37k
  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
1939
1.22k
    Result.setMetadata("srcloc",
1940
1.22k
                       getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
1941
155
  else {
1942
155
    // At least put the line number on MS inline asm blobs.
1943
155
    llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
1944
155
                                        S.getAsmLoc().getRawEncoding());
1945
155
    Result.setMetadata("srcloc",
1946
155
                       llvm::MDNode::get(CGF.getLLVMContext(),
1947
155
                                         llvm::ConstantAsMetadata::get(Loc)));
1948
155
  }
1949
1.37k
1950
1.37k
  if (CGF.getLangOpts().assumeFunctionsAreConvergent())
1951
8
    // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
1952
8
    // convergent (meaning, they may call an intrinsically convergent op, such
1953
8
    // as bar.sync, and so can't have certain optimizations applied around
1954
8
    // them).
1955
8
    Result.addAttribute(llvm::AttributeList::FunctionIndex,
1956
8
                        llvm::Attribute::Convergent);
1957
1.37k
  // Extract all of the register value results from the asm.
1958
1.37k
  if (ResultRegTypes.size() == 1) {
1959
704
    RegResults.push_back(&Result);
1960
704
  } else {
1961
876
    for (unsigned i = 0, e = ResultRegTypes.size(); i != e; 
++i204
) {
1962
204
      llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
1963
204
      RegResults.push_back(Tmp);
1964
204
    }
1965
672
  }
1966
1.37k
}
1967
1968
1.37k
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
1969
1.37k
  // Assemble the final asm string.
1970
1.37k
  std::string AsmString = S.generateAsmString(getContext());
1971
1.37k
1972
1.37k
  // Get all the output and input constraints together.
1973
1.37k
  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1974
1.37k
  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1975
1.37k
1976
2.44k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.06k
) {
1977
1.06k
    StringRef Name;
1978
1.06k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1979
1.04k
      Name = GAS->getOutputName(i);
1980
1.06k
    TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
1981
1.06k
    bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
1982
1.06k
    assert(IsValid && "Failed to parse output constraint");
1983
1.06k
    OutputConstraintInfos.push_back(Info);
1984
1.06k
  }
1985
1.37k
1986
2.64k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.27k
) {
1987
1.27k
    StringRef Name;
1988
1.27k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1989
1.16k
      Name = GAS->getInputName(i);
1990
1.27k
    TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
1991
1.27k
    bool IsValid =
1992
1.27k
      getTarget().validateInputConstraint(OutputConstraintInfos, Info);
1993
1.27k
    assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1994
1.27k
    InputConstraintInfos.push_back(Info);
1995
1.27k
  }
1996
1.37k
1997
1.37k
  std::string Constraints;
1998
1.37k
1999
1.37k
  std::vector<LValue> ResultRegDests;
2000
1.37k
  std::vector<QualType> ResultRegQualTys;
2001
1.37k
  std::vector<llvm::Type *> ResultRegTypes;
2002
1.37k
  std::vector<llvm::Type *> ResultTruncRegTypes;
2003
1.37k
  std::vector<llvm::Type *> ArgTypes;
2004
1.37k
  std::vector<llvm::Value*> Args;
2005
1.37k
  llvm::BitVector ResultTypeRequiresCast;
2006
1.37k
2007
1.37k
  // Keep track of inout constraints.
2008
1.37k
  std::string InOutConstraints;
2009
1.37k
  std::vector<llvm::Value*> InOutArgs;
2010
1.37k
  std::vector<llvm::Type*> InOutArgTypes;
2011
1.37k
2012
1.37k
  // Keep track of out constraints for tied input operand.
2013
1.37k
  std::vector<std::string> OutputConstraints;
2014
1.37k
2015
1.37k
  // An inline asm can be marked readonly if it meets the following conditions:
2016
1.37k
  //  - it doesn't have any sideeffects
2017
1.37k
  //  - it doesn't clobber memory
2018
1.37k
  //  - it doesn't return a value by-reference
2019
1.37k
  // It can be marked readnone if it doesn't have any input memory constraints
2020
1.37k
  // in addition to meeting the conditions listed above.
2021
1.37k
  bool ReadOnly = true, ReadNone = true;
2022
1.37k
2023
2.44k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.06k
) {
2024
1.06k
    TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2025
1.06k
2026
1.06k
    // Simplify the output constraint.
2027
1.06k
    std::string OutputConstraint(S.getOutputConstraint(i));
2028
1.06k
    OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2029
1.06k
                                          getTarget(), &OutputConstraintInfos);
2030
1.06k
2031
1.06k
    const Expr *OutExpr = S.getOutputExpr(i);
2032
1.06k
    OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2033
1.06k
2034
1.06k
    OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2035
1.06k
                                              getTarget(), CGM, S,
2036
1.06k
                                              Info.earlyClobber());
2037
1.06k
    OutputConstraints.push_back(OutputConstraint);
2038
1.06k
    LValue Dest = EmitLValue(OutExpr);
2039
1.06k
    if (!Constraints.empty())
2040
184
      Constraints += ',';
2041
1.06k
2042
1.06k
    // If this is a register output, then make the inline asm return it
2043
1.06k
    // by-value.  If this is a memory result, return the value by-reference.
2044
1.06k
    bool isScalarizableAggregate =
2045
1.06k
        hasAggregateEvaluationKind(OutExpr->getType());
2046
1.06k
    if (!Info.allowsMemory() && 
(885
hasScalarEvaluationKind(OutExpr->getType())885
||
2047
885
                                 
isScalarizableAggregate16
)) {
2048
885
      Constraints += "=" + OutputConstraint;
2049
885
      ResultRegQualTys.push_back(OutExpr->getType());
2050
885
      ResultRegDests.push_back(Dest);
2051
885
      ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
2052
885
      if (Info.allowsRegister() && isScalarizableAggregate) {
2053
16
        ResultTypeRequiresCast.push_back(true);
2054
16
        unsigned Size = getContext().getTypeSize(OutExpr->getType());
2055
16
        llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
2056
16
        ResultRegTypes.push_back(ConvTy);
2057
869
      } else {
2058
869
        ResultTypeRequiresCast.push_back(false);
2059
869
        ResultRegTypes.push_back(ResultTruncRegTypes.back());
2060
869
      }
2061
885
      // If this output is tied to an input, and if the input is larger, then
2062
885
      // we need to set the actual result type of the inline asm node to be the
2063
885
      // same as the input type.
2064
885
      if (Info.hasMatchingInput()) {
2065
35
        unsigned InputNo;
2066
38
        for (InputNo = 0; InputNo != S.getNumInputs(); 
++InputNo3
) {
2067
38
          TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2068
38
          if (Input.hasTiedOperand() && 
Input.getTiedOperand() == i37
)
2069
35
            break;
2070
38
        }
2071
35
        assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2072
35
2073
35
        QualType InputTy = S.getInputExpr(InputNo)->getType();
2074
35
        QualType OutputType = OutExpr->getType();
2075
35
2076
35
        uint64_t InputSize = getContext().getTypeSize(InputTy);
2077
35
        if (getContext().getTypeSize(OutputType) < InputSize) {
2078
4
          // Form the asm to return the value as a larger integer or fp type.
2079
4
          ResultRegTypes.back() = ConvertType(InputTy);
2080
4
        }
2081
35
      }
2082
885
      if (llvm::Type* AdjTy =
2083
885
            getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2084
885
                                                 ResultRegTypes.back()))
2085
885
        ResultRegTypes.back() = AdjTy;
2086
0
      else {
2087
0
        CGM.getDiags().Report(S.getAsmLoc(),
2088
0
                              diag::err_asm_invalid_type_in_input)
2089
0
            << OutExpr->getType() << OutputConstraint;
2090
0
      }
2091
885
2092
885
      // Update largest vector width for any vector types.
2093
885
      if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2094
51
        LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
2095
51
                                   VT->getPrimitiveSizeInBits().getFixedSize());
2096
885
    } else {
2097
180
      ArgTypes.push_back(Dest.getAddress(*this).getType());
2098
180
      Args.push_back(Dest.getPointer(*this));
2099
180
      Constraints += "=*";
2100
180
      Constraints += OutputConstraint;
2101
180
      ReadOnly = ReadNone = false;
2102
180
    }
2103
1.06k
2104
1.06k
    if (Info.isReadWrite()) {
2105
135
      InOutConstraints += ',';
2106
135
2107
135
      const Expr *InputExpr = S.getOutputExpr(i);
2108
135
      llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2109
135
                                            InOutConstraints,
2110
135
                                            InputExpr->getExprLoc());
2111
135
2112
135
      if (llvm::Type* AdjTy =
2113
135
          getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2114
135
                                               Arg->getType()))
2115
135
        Arg = Builder.CreateBitCast(Arg, AdjTy);
2116
135
2117
135
      // Update largest vector width for any vector types.
2118
135
      if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2119
6
        LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
2120
6
                                   VT->getPrimitiveSizeInBits().getFixedSize());
2121
135
      if (Info.allowsRegister())
2122
120
        InOutConstraints += llvm::utostr(i);
2123
15
      else
2124
15
        InOutConstraints += OutputConstraint;
2125
135
2126
135
      InOutArgTypes.push_back(Arg->getType());
2127
135
      InOutArgs.push_back(Arg);
2128
135
    }
2129
1.06k
  }
2130
1.37k
2131
1.37k
  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2132
1.37k
  // to the return value slot. Only do this when returning in registers.
2133
1.37k
  if (isa<MSAsmStmt>(&S)) {
2134
155
    const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2135
155
    if (RetAI.isDirect() || 
RetAI.isExtend()132
) {
2136
26
      // Make a fake lvalue for the return value slot.
2137
26
      LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2138
26
      CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2139
26
          *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2140
26
          ResultRegDests, AsmString, S.getNumOutputs());
2141
26
      SawAsmBlock = true;
2142
26
    }
2143
155
  }
2144
1.37k
2145
2.64k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.27k
) {
2146
1.27k
    const Expr *InputExpr = S.getInputExpr(i);
2147
1.27k
2148
1.27k
    TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2149
1.27k
2150
1.27k
    if (Info.allowsMemory())
2151
582
      ReadNone = false;
2152
1.27k
2153
1.27k
    if (!Constraints.empty())
2154
1.09k
      Constraints += ',';
2155
1.27k
2156
1.27k
    // Simplify the input constraint.
2157
1.27k
    std::string InputConstraint(S.getInputConstraint(i));
2158
1.27k
    InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2159
1.27k
                                         &OutputConstraintInfos);
2160
1.27k
2161
1.27k
    InputConstraint = AddVariableConstraints(
2162
1.27k
        InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2163
1.27k
        getTarget(), CGM, S, false /* No EarlyClobber */);
2164
1.27k
2165
1.27k
    std::string ReplaceConstraint (InputConstraint);
2166
1.27k
    llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2167
1.27k
2168
1.27k
    // If this input argument is tied to a larger output result, extend the
2169
1.27k
    // input to be the same size as the output.  The LLVM backend wants to see
2170
1.27k
    // the input and output of a matching constraint be the same size.  Note
2171
1.27k
    // that GCC does not define what the top bits are here.  We use zext because
2172
1.27k
    // that is usually cheaper, but LLVM IR should really get an anyext someday.
2173
1.27k
    if (Info.hasTiedOperand()) {
2174
35
      unsigned Output = Info.getTiedOperand();
2175
35
      QualType OutputType = S.getOutputExpr(Output)->getType();
2176
35
      QualType InputTy = InputExpr->getType();
2177
35
2178
35
      if (getContext().getTypeSize(OutputType) >
2179
35
          getContext().getTypeSize(InputTy)) {
2180
7
        // Use ptrtoint as appropriate so that we can do our extension.
2181
7
        if (isa<llvm::PointerType>(Arg->getType()))
2182
0
          Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2183
7
        llvm::Type *OutputTy = ConvertType(OutputType);
2184
7
        if (isa<llvm::IntegerType>(OutputTy))
2185
3
          Arg = Builder.CreateZExt(Arg, OutputTy);
2186
4
        else if (isa<llvm::PointerType>(OutputTy))
2187
1
          Arg = Builder.CreateZExt(Arg, IntPtrTy);
2188
3
        else {
2189
3
          assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2190
3
          Arg = Builder.CreateFPExt(Arg, OutputTy);
2191
3
        }
2192
7
      }
2193
35
      // Deal with the tied operands' constraint code in adjustInlineAsmType.
2194
35
      ReplaceConstraint = OutputConstraints[Output];
2195
35
    }
2196
1.27k
    if (llvm::Type* AdjTy =
2197
1.27k
          getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2198
1.27k
                                                   Arg->getType()))
2199
1.27k
      Arg = Builder.CreateBitCast(Arg, AdjTy);
2200
0
    else
2201
0
      CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2202
0
          << InputExpr->getType() << InputConstraint;
2203
1.27k
2204
1.27k
    // Update largest vector width for any vector types.
2205
1.27k
    if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2206
47
      LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
2207
47
                                   VT->getPrimitiveSizeInBits().getFixedSize());
2208
1.27k
2209
1.27k
    ArgTypes.push_back(Arg->getType());
2210
1.27k
    Args.push_back(Arg);
2211
1.27k
    Constraints += InputConstraint;
2212
1.27k
  }
2213
1.37k
2214
1.37k
  // Labels
2215
1.37k
  SmallVector<llvm::BasicBlock *, 16> Transfer;
2216
1.37k
  llvm::BasicBlock *Fallthrough = nullptr;
2217
1.37k
  bool IsGCCAsmGoto = false;
2218
1.37k
  if (const auto *GS =  dyn_cast<GCCAsmStmt>(&S)) {
2219
1.22k
    IsGCCAsmGoto = GS->isAsmGoto();
2220
1.22k
    if (IsGCCAsmGoto) {
2221
45
      for (const auto *E : GS->labels()) {
2222
45
        JumpDest Dest = getJumpDestForLabel(E->getLabel());
2223
45
        Transfer.push_back(Dest.getBlock());
2224
45
        llvm::BlockAddress *BA =
2225
45
            llvm::BlockAddress::get(CurFn, Dest.getBlock());
2226
45
        Args.push_back(BA);
2227
45
        ArgTypes.push_back(BA->getType());
2228
45
        if (!Constraints.empty())
2229
40
          Constraints += ',';
2230
45
        Constraints += 'X';
2231
45
      }
2232
26
      Fallthrough = createBasicBlock("asm.fallthrough");
2233
26
    }
2234
1.22k
  }
2235
1.37k
2236
1.37k
  // Append the "input" part of inout constraints last.
2237
1.51k
  for (unsigned i = 0, e = InOutArgs.size(); i != e; 
i++135
) {
2238
135
    ArgTypes.push_back(InOutArgTypes[i]);
2239
135
    Args.push_back(InOutArgs[i]);
2240
135
  }
2241
1.37k
  Constraints += InOutConstraints;
2242
1.37k
2243
1.37k
  // Clobbers
2244
2.33k
  for (unsigned i = 0, e = S.getNumClobbers(); i != e; 
i++963
) {
2245
963
    StringRef Clobber = S.getClobber(i);
2246
963
2247
963
    if (Clobber == "memory")
2248
67
      ReadOnly = ReadNone = false;
2249
896
    else if (Clobber != "cc") {
2250
854
      Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2251
854
      if (CGM.getCodeGenOpts().StackClashProtector &&
2252
854
          
getTarget().isSPRegName(Clobber)2
) {
2253
2
        CGM.getDiags().Report(S.getAsmLoc(),
2254
2
                              diag::warn_stack_clash_protection_inline_asm);
2255
2
      }
2256
854
    }
2257
963
2258
963
    if (!Constraints.empty())
2259
771
      Constraints += ',';
2260
963
2261
963
    Constraints += "~{";
2262
963
    Constraints += Clobber;
2263
963
    Constraints += '}';
2264
963
  }
2265
1.37k
2266
1.37k
  // Add machine specific clobbers
2267
1.37k
  std::string MachineClobbers = getTarget().getClobbers();
2268
1.37k
  if (!MachineClobbers.empty()) {
2269
907
    if (!Constraints.empty())
2270
813
      Constraints += ',';
2271
907
    Constraints += MachineClobbers;
2272
907
  }
2273
1.37k
2274
1.37k
  llvm::Type *ResultType;
2275
1.37k
  if (ResultRegTypes.empty())
2276
602
    ResultType = VoidTy;
2277
774
  else if (ResultRegTypes.size() == 1)
2278
704
    ResultType = ResultRegTypes[0];
2279
70
  else
2280
70
    ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2281
1.37k
2282
1.37k
  llvm::FunctionType *FTy =
2283
1.37k
    llvm::FunctionType::get(ResultType, ArgTypes, false);
2284
1.37k
2285
1.37k
  bool HasSideEffect = S.isVolatile() || 
S.getNumOutputs() == 0864
;
2286
1.37k
  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2287
1.22k
    
llvm::InlineAsm::AD_Intel155
: llvm::InlineAsm::AD_ATT;
2288
1.37k
  llvm::InlineAsm *IA =
2289
1.37k
    llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2290
1.37k
                         /* IsAlignStack */ false, AsmDialect);
2291
1.37k
  std::vector<llvm::Value*> RegResults;
2292
1.37k
  if (IsGCCAsmGoto) {
2293
26
    llvm::CallBrInst *Result =
2294
26
        Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2295
26
    EmitBlock(Fallthrough);
2296
26
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2297
26
                      ReadNone, S, ResultRegTypes, *this, RegResults);
2298
1.35k
  } else {
2299
1.35k
    llvm::CallInst *Result =
2300
1.35k
        Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2301
1.35k
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2302
1.35k
                      ReadNone, S, ResultRegTypes, *this, RegResults);
2303
1.35k
  }
2304
1.37k
2305
1.37k
  assert(RegResults.size() == ResultRegTypes.size());
2306
1.37k
  assert(RegResults.size() == ResultTruncRegTypes.size());
2307
1.37k
  assert(RegResults.size() == ResultRegDests.size());
2308
1.37k
  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2309
1.37k
  // in which case its size may grow.
2310
1.37k
  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2311
2.28k
  for (unsigned i = 0, e = RegResults.size(); i != e; 
++i905
) {
2312
908
    llvm::Value *Tmp = RegResults[i];
2313
908
2314
908
    // If the result type of the LLVM IR asm doesn't match the result type of
2315
908
    // the expression, do the conversion.
2316
908
    if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2317
34
      llvm::Type *TruncTy = ResultTruncRegTypes[i];
2318
34
2319
34
      // Truncate the integer result to the right size, note that TruncTy can be
2320
34
      // a pointer.
2321
34
      if (TruncTy->isFloatingPointTy())
2322
1
        Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2323
33
      else if (TruncTy->isPointerTy() && 
Tmp->getType()->isIntegerTy()0
) {
2324
0
        uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2325
0
        Tmp = Builder.CreateTrunc(Tmp,
2326
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2327
0
        Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2328
33
      } else if (Tmp->getType()->isPointerTy() && 
TruncTy->isIntegerTy()0
) {
2329
0
        uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2330
0
        Tmp = Builder.CreatePtrToInt(Tmp,
2331
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2332
0
        Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2333
33
      } else if (TruncTy->isIntegerTy()) {
2334
7
        Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2335
26
      } else if (TruncTy->isVectorTy()) {
2336
10
        Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2337
10
      }
2338
34
    }
2339
908
2340
908
    LValue Dest = ResultRegDests[i];
2341
908
    // ResultTypeRequiresCast elements correspond to the first
2342
908
    // ResultTypeRequiresCast.size() elements of RegResults.
2343
908
    if ((i < ResultTypeRequiresCast.size()) && 
ResultTypeRequiresCast[i]885
) {
2344
16
      unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2345
16
      Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2346
16
                                        ResultRegTypes[i]->getPointerTo());
2347
16
      QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2348
16
      if (Ty.isNull()) {
2349
3
        const Expr *OutExpr = S.getOutputExpr(i);
2350
3
        CGM.Error(
2351
3
            OutExpr->getExprLoc(),
2352
3
            "impossible constraint in asm: can't store value into a register");
2353
3
        return;
2354
3
      }
2355
13
      Dest = MakeAddrLValue(A, Ty);
2356
13
    }
2357
908
    EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2358
905
  }
2359
1.37k
}
2360
2361
662
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2362
662
  const RecordDecl *RD = S.getCapturedRecordDecl();
2363
662
  QualType RecordTy = getContext().getRecordType(RD);
2364
662
2365
662
  // Initialize the captured struct.
2366
662
  LValue SlotLV =
2367
662
    MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2368
662
2369
662
  RecordDecl::field_iterator CurField = RD->field_begin();
2370
662
  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2371
662
                                                 E = S.capture_init_end();
2372
1.77k
       I != E; 
++I, ++CurField1.11k
) {
2373
1.11k
    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2374
1.11k
    if (CurField->hasCapturedVLAType()) {
2375
47
      auto VAT = CurField->getCapturedVLAType();
2376
47
      EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2377
1.06k
    } else {
2378
1.06k
      EmitInitializerForField(*CurField, LV, *I);
2379
1.06k
    }
2380
1.11k
  }
2381
662
2382
662
  return SlotLV;
2383
662
}
2384
2385
/// Generate an outlined function for the body of a CapturedStmt, store any
2386
/// captured variables into the captured struct, and call the outlined function.
2387
llvm::Function *
2388
27
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2389
27
  LValue CapStruct = InitCapturedStruct(S);
2390
27
2391
27
  // Emit the CapturedDecl
2392
27
  CodeGenFunction CGF(CGM, true);
2393
27
  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2394
27
  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2395
27
  delete CGF.CapturedStmtInfo;
2396
27
2397
27
  // Emit call to the helper function.
2398
27
  EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2399
27
2400
27
  return F;
2401
27
}
2402
2403
635
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2404
635
  LValue CapStruct = InitCapturedStruct(S);
2405
635
  return CapStruct.getAddress(*this);
2406
635
}
2407
2408
/// Creates the outlined function for a CapturedStmt.
2409
llvm::Function *
2410
662
CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2411
662
  assert(CapturedStmtInfo &&
2412
662
    "CapturedStmtInfo should be set when generating the captured function");
2413
662
  const CapturedDecl *CD = S.getCapturedDecl();
2414
662
  const RecordDecl *RD = S.getCapturedRecordDecl();
2415
662
  SourceLocation Loc = S.getBeginLoc();
2416
662
  assert(CD->hasBody() && "missing CapturedDecl body");
2417
662
2418
662
  // Build the argument list.
2419
662
  ASTContext &Ctx = CGM.getContext();
2420
662
  FunctionArgList Args;
2421
662
  Args.append(CD->param_begin(), CD->param_end());
2422
662
2423
662
  // Create the function declaration.
2424
662
  const CGFunctionInfo &FuncInfo =
2425
662
    CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2426
662
  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2427
662
2428
662
  llvm::Function *F =
2429
662
    llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2430
662
                           CapturedStmtInfo->getHelperName(), &CGM.getModule());
2431
662
  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2432
662
  if (CD->isNothrow())
2433
485
    F->addFnAttr(llvm::Attribute::NoUnwind);
2434
662
2435
662
  // Generate the function.
2436
662
  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2437
662
                CD->getBody()->getBeginLoc());
2438
662
  // Set the context parameter in CapturedStmtInfo.
2439
662
  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2440
662
  CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2441
662
2442
662
  // Initialize variable-length arrays.
2443
662
  LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2444
662
                                           Ctx.getTagDeclType(RD));
2445
1.11k
  for (auto *FD : RD->fields()) {
2446
1.11k
    if (FD->hasCapturedVLAType()) {
2447
47
      auto *ExprArg =
2448
47
          EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2449
47
              .getScalarVal();
2450
47
      auto VAT = FD->getCapturedVLAType();
2451
47
      VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2452
47
    }
2453
1.11k
  }
2454
662
2455
662
  // If 'this' is captured, load it into CXXThisValue.
2456
662
  if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2457
24
    FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2458
24
    LValue ThisLValue = EmitLValueForField(Base, FD);
2459
24
    CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2460
24
  }
2461
662
2462
662
  PGO.assignRegionCounters(GlobalDecl(CD), F);
2463
662
  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2464
662
  FinishFunction(CD->getBodyRBrace());
2465
662
2466
662
  return F;
2467
662
}