Coverage Report

Created: 2020-09-19 12:23

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGStmt.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Stmt nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGDebugInfo.h"
14
#include "CGOpenMPRuntime.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "TargetInfo.h"
18
#include "clang/AST/Attr.h"
19
#include "clang/AST/StmtVisitor.h"
20
#include "clang/Basic/Builtins.h"
21
#include "clang/Basic/PrettyStackTrace.h"
22
#include "clang/Basic/SourceManager.h"
23
#include "clang/Basic/TargetInfo.h"
24
#include "llvm/ADT/StringExtras.h"
25
#include "llvm/IR/DataLayout.h"
26
#include "llvm/IR/InlineAsm.h"
27
#include "llvm/IR/Intrinsics.h"
28
#include "llvm/IR/MDBuilder.h"
29
#include "llvm/Support/SaveAndRestore.h"
30
#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
31
32
using namespace clang;
33
using namespace CodeGen;
34
35
//===----------------------------------------------------------------------===//
36
//                              Statement Emission
37
//===----------------------------------------------------------------------===//
38
39
661k
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
40
661k
  if (CGDebugInfo *DI = getDebugInfo()) {
41
318k
    SourceLocation Loc;
42
318k
    Loc = S->getBeginLoc();
43
318k
    DI->EmitLocation(Builder, Loc);
44
318k
45
318k
    LastStopPoint = Loc;
46
318k
  }
47
661k
}
48
49
730k
void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
50
730k
  assert(S && "Null statement?");
51
730k
  PGO.setCurrentStmt(S);
52
730k
53
  // These statements have their own debug info handling.
54
730k
  if (EmitSimpleStmt(S))
55
250k
    return;
56
479k
57
  // Check if we are generating unreachable code.
58
479k
  if (!HaveInsertPoint()) {
59
    // If so, and the statement doesn't contain a label, then we do not need to
60
    // generate actual code. This is safe because (1) the current point is
61
    // unreachable, so we don't need to execute the code, and (2) we've already
62
    // handled the statements which update internal data structures (like the
63
    // local variable map) which could be used by subsequent statements.
64
131
    if (!ContainsLabel(S)) {
65
      // Verify that any decl statements were handled as simple, they may be in
66
      // scope of subsequent reachable statements.
67
126
      assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
68
126
      return;
69
126
    }
70
5
71
    // Otherwise, make a new block to hold the code.
72
5
    EnsureInsertPoint();
73
5
  }
74
479k
75
  // Generate a stoppoint if we are emitting debug info.
76
479k
  EmitStopPoint(S);
77
479k
78
  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
79
  // enabled.
80
479k
  if (getLangOpts().OpenMP && 
getLangOpts().OpenMPSimd90.3k
) {
81
45.8k
    if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
82
13.4k
      EmitSimpleOMPExecutableDirective(*D);
83
13.4k
      return;
84
13.4k
    }
85
466k
  }
86
466k
87
466k
  switch (S->getStmtClass()) {
88
0
  case Stmt::NoStmtClass:
89
0
  case Stmt::CXXCatchStmtClass:
90
0
  case Stmt::SEHExceptStmtClass:
91
0
  case Stmt::SEHFinallyStmtClass:
92
0
  case Stmt::MSDependentExistsStmtClass:
93
0
    llvm_unreachable("invalid statement class to emit generically");
94
0
  case Stmt::NullStmtClass:
95
0
  case Stmt::CompoundStmtClass:
96
0
  case Stmt::DeclStmtClass:
97
0
  case Stmt::LabelStmtClass:
98
0
  case Stmt::AttributedStmtClass:
99
0
  case Stmt::GotoStmtClass:
100
0
  case Stmt::BreakStmtClass:
101
0
  case Stmt::ContinueStmtClass:
102
0
  case Stmt::DefaultStmtClass:
103
0
  case Stmt::CaseStmtClass:
104
0
  case Stmt::SEHLeaveStmtClass:
105
0
    llvm_unreachable("should have emitted these statements as simple");
106
0
107
0
#define STMT(Type, Base)
108
0
#define ABSTRACT_STMT(Op)
109
0
#define EXPR(Type, Base) \
110
28.2M
  case Stmt::Type##Class:
111
231k
#include 
"clang/AST/StmtNodes.inc"0
112
231k
  {
113
    // Remember the block we came in on.
114
231k
    llvm::BasicBlock *incoming = Builder.GetInsertBlock();
115
231k
    assert(incoming && "expression emission must have an insertion point");
116
231k
117
231k
    EmitIgnoredExpr(cast<Expr>(S));
118
231k
119
231k
    llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
120
231k
    assert(outgoing && "expression emission cleared block!");
121
231k
122
    // The expression emitters assume (reasonably!) that the insertion
123
    // point is always set.  To maintain that, the call-emission code
124
    // for noreturn functions has to enter a new block with no
125
    // predecessors.  We want to kill that block and mark the current
126
    // insertion point unreachable in the common case of a call like
127
    // "exit();".  Since expression emission doesn't otherwise create
128
    // blocks with no predecessors, we can just test for that.
129
    // However, we must be careful not to do this to our incoming
130
    // block, because *statement* emission does sometimes create
131
    // reachable blocks which will have no predecessors until later in
132
    // the function.  This occurs with, e.g., labels that are not
133
    // reachable by fallthrough.
134
231k
    if (incoming != outgoing && 
outgoing->use_empty()19.9k
) {
135
2.23k
      outgoing->eraseFromParent();
136
2.23k
      Builder.ClearInsertionPoint();
137
2.23k
    }
138
231k
    break;
139
28.0M
  }
140
28.0M
141
22
  case Stmt::IndirectGotoStmtClass:
142
22
    EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
143
28.0M
144
64.1k
  case Stmt::IfStmtClass:      EmitIfStmt(cast<IfStmt>(*S));              break;
145
1.57k
  case Stmt::WhileStmtClass:   EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
146
566
  case Stmt::DoStmtClass:      EmitDoStmt(cast<DoStmt>(*S), Attrs);       break;
147
9.23k
  case Stmt::ForStmtClass:     EmitForStmt(cast<ForStmt>(*S), Attrs);     break;
148
28.0M
149
140k
  case Stmt::ReturnStmtClass:  EmitReturnStmt(cast<ReturnStmt>(*S));      break;
150
28.0M
151
467
  case Stmt::SwitchStmtClass:  EmitSwitchStmt(cast<SwitchStmt>(*S));      break;
152
1.58k
  case Stmt::GCCAsmStmtClass:  // Intentional fall-through.
153
1.58k
  case Stmt::MSAsmStmtClass:   EmitAsmStmt(cast<AsmStmt>(*S));            break;
154
51
  case Stmt::CoroutineBodyStmtClass:
155
51
    EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
156
51
    break;
157
50
  case Stmt::CoreturnStmtClass:
158
50
    EmitCoreturnStmt(cast<CoreturnStmt>(*S));
159
50
    break;
160
27
  case Stmt::CapturedStmtClass: {
161
27
    const CapturedStmt *CS = cast<CapturedStmt>(S);
162
27
    EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
163
27
    }
164
27
    break;
165
218
  case Stmt::ObjCAtTryStmtClass:
166
218
    EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
167
218
    break;
168
0
  case Stmt::ObjCAtCatchStmtClass:
169
0
    llvm_unreachable(
170
1.58k
                    "@catch statements should be handled by EmitObjCAtTryStmt");
171
0
  case Stmt::ObjCAtFinallyStmtClass:
172
0
    llvm_unreachable(
173
1.58k
                  "@finally statements should be handled by EmitObjCAtTryStmt");
174
50
  case Stmt::ObjCAtThrowStmtClass:
175
50
    EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
176
50
    break;
177
13
  case Stmt::ObjCAtSynchronizedStmtClass:
178
13
    EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
179
13
    break;
180
73
  case Stmt::ObjCForCollectionStmtClass:
181
73
    EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
182
73
    break;
183
113
  case Stmt::ObjCAutoreleasePoolStmtClass:
184
113
    EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
185
113
    break;
186
1.58k
187
326
  case Stmt::CXXTryStmtClass:
188
326
    EmitCXXTryStmt(cast<CXXTryStmt>(*S));
189
326
    break;
190
114
  case Stmt::CXXForRangeStmtClass:
191
114
    EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
192
114
    break;
193
128
  case Stmt::SEHTryStmtClass:
194
128
    EmitSEHTryStmt(cast<SEHTryStmt>(*S));
195
128
    break;
196
937
  case Stmt::OMPParallelDirectiveClass:
197
937
    EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
198
937
    break;
199
172
  case Stmt::OMPSimdDirectiveClass:
200
172
    EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
201
172
    break;
202
345
  case Stmt::OMPForDirectiveClass:
203
345
    EmitOMPForDirective(cast<OMPForDirective>(*S));
204
345
    break;
205
249
  case Stmt::OMPForSimdDirectiveClass:
206
249
    EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
207
249
    break;
208
62
  case Stmt::OMPSectionsDirectiveClass:
209
62
    EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
210
62
    break;
211
54
  case Stmt::OMPSectionDirectiveClass:
212
54
    EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
213
54
    break;
214
55
  case Stmt::OMPSingleDirectiveClass:
215
55
    EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
216
55
    break;
217
15
  case Stmt::OMPMasterDirectiveClass:
218
15
    EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
219
15
    break;
220
56
  case Stmt::OMPCriticalDirectiveClass:
221
56
    EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
222
56
    break;
223
207
  case Stmt::OMPParallelForDirectiveClass:
224
207
    EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
225
207
    break;
226
96
  case Stmt::OMPParallelForSimdDirectiveClass:
227
96
    EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
228
96
    break;
229
30
  case Stmt::OMPParallelMasterDirectiveClass:
230
30
    EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
231
30
    break;
232
26
  case Stmt::OMPParallelSectionsDirectiveClass:
233
26
    EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
234
26
    break;
235
175
  case Stmt::OMPTaskDirectiveClass:
236
175
    EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
237
175
    break;
238
16
  case Stmt::OMPTaskyieldDirectiveClass:
239
16
    EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
240
16
    break;
241
34
  case Stmt::OMPBarrierDirectiveClass:
242
34
    EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
243
34
    break;
244
12
  case Stmt::OMPTaskwaitDirectiveClass:
245
12
    EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
246
12
    break;
247
39
  case Stmt::OMPTaskgroupDirectiveClass:
248
39
    EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
249
39
    break;
250
40
  case Stmt::OMPFlushDirectiveClass:
251
40
    EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
252
40
    break;
253
14
  case Stmt::OMPDepobjDirectiveClass:
254
14
    EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
255
14
    break;
256
36
  case Stmt::OMPScanDirectiveClass:
257
36
    EmitOMPScanDirective(cast<OMPScanDirective>(*S));
258
36
    break;
259
40
  case Stmt::OMPOrderedDirectiveClass:
260
40
    EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
261
40
    break;
262
452
  case Stmt::OMPAtomicDirectiveClass:
263
452
    EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
264
452
    break;
265
4.62k
  case Stmt::OMPTargetDirectiveClass:
266
4.62k
    EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
267
4.62k
    break;
268
900
  case Stmt::OMPTeamsDirectiveClass:
269
900
    EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
270
900
    break;
271
48
  case Stmt::OMPCancellationPointDirectiveClass:
272
48
    EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
273
48
    break;
274
163
  case Stmt::OMPCancelDirectiveClass:
275
163
    EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
276
163
    break;
277
137
  case Stmt::OMPTargetDataDirectiveClass:
278
137
    EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
279
137
    break;
280
83
  case Stmt::OMPTargetEnterDataDirectiveClass:
281
83
    EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
282
83
    break;
283
69
  case Stmt::OMPTargetExitDataDirectiveClass:
284
69
    EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
285
69
    break;
286
539
  case Stmt::OMPTargetParallelDirectiveClass:
287
539
    EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
288
539
    break;
289
343
  case Stmt::OMPTargetParallelForDirectiveClass:
290
343
    EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
291
343
    break;
292
37
  case Stmt::OMPTaskLoopDirectiveClass:
293
37
    EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
294
37
    break;
295
40
  case Stmt::OMPTaskLoopSimdDirectiveClass:
296
40
    EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
297
40
    break;
298
35
  case Stmt::OMPMasterTaskLoopDirectiveClass:
299
35
    EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
300
35
    break;
301
40
  case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
302
40
    EmitOMPMasterTaskLoopSimdDirective(
303
40
        cast<OMPMasterTaskLoopSimdDirective>(*S));
304
40
    break;
305
33
  case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
306
33
    EmitOMPParallelMasterTaskLoopDirective(
307
33
        cast<OMPParallelMasterTaskLoopDirective>(*S));
308
33
    break;
309
39
  case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
310
39
    EmitOMPParallelMasterTaskLoopSimdDirective(
311
39
        cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
312
39
    break;
313
110
  case Stmt::OMPDistributeDirectiveClass:
314
110
    EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
315
110
    break;
316
193
  case Stmt::OMPTargetUpdateDirectiveClass:
317
193
    EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
318
193
    break;
319
356
  case Stmt::OMPDistributeParallelForDirectiveClass:
320
356
    EmitOMPDistributeParallelForDirective(
321
356
        cast<OMPDistributeParallelForDirective>(*S));
322
356
    break;
323
252
  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
324
252
    EmitOMPDistributeParallelForSimdDirective(
325
252
        cast<OMPDistributeParallelForSimdDirective>(*S));
326
252
    break;
327
150
  case Stmt::OMPDistributeSimdDirectiveClass:
328
150
    EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
329
150
    break;
330
310
  case Stmt::OMPTargetParallelForSimdDirectiveClass:
331
310
    EmitOMPTargetParallelForSimdDirective(
332
310
        cast<OMPTargetParallelForSimdDirective>(*S));
333
310
    break;
334
327
  case Stmt::OMPTargetSimdDirectiveClass:
335
327
    EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
336
327
    break;
337
114
  case Stmt::OMPTeamsDistributeDirectiveClass:
338
114
    EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
339
114
    break;
340
126
  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
341
126
    EmitOMPTeamsDistributeSimdDirective(
342
126
        cast<OMPTeamsDistributeSimdDirective>(*S));
343
126
    break;
344
296
  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
345
296
    EmitOMPTeamsDistributeParallelForSimdDirective(
346
296
        cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
347
296
    break;
348
308
  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
349
308
    EmitOMPTeamsDistributeParallelForDirective(
350
308
        cast<OMPTeamsDistributeParallelForDirective>(*S));
351
308
    break;
352
629
  case Stmt::OMPTargetTeamsDirectiveClass:
353
629
    EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
354
629
    break;
355
406
  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
356
406
    EmitOMPTargetTeamsDistributeDirective(
357
406
        cast<OMPTargetTeamsDistributeDirective>(*S));
358
406
    break;
359
362
  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
360
362
    EmitOMPTargetTeamsDistributeParallelForDirective(
361
362
        cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
362
362
    break;
363
528
  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
364
528
    EmitOMPTargetTeamsDistributeParallelForSimdDirective(
365
528
        cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
366
528
    break;
367
428
  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
368
428
    EmitOMPTargetTeamsDistributeSimdDirective(
369
428
        cast<OMPTargetTeamsDistributeSimdDirective>(*S));
370
428
    break;
371
466k
  }
372
466k
}
373
374
730k
bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
375
730k
  switch (S->getStmtClass()) {
376
479k
  default: return false;
377
9.61k
  case Stmt::NullStmtClass: break;
378
79.1k
  case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
379
149k
  case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
380
199
  case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
381
132
  case Stmt::AttributedStmtClass:
382
132
                            EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
383
3.12k
  case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
384
3.02k
  case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
385
4.41k
  case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
386
178
  case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
387
934
  case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
388
10
  case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
389
250k
  }
390
250k
391
250k
  return true;
392
250k
}
393
394
/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
395
/// this captures the expression result of the last sub-statement and returns it
396
/// (for use by the statement expression extension).
397
Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
398
83.2k
                                          AggValueSlot AggSlot) {
399
83.2k
  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
400
83.2k
                             "LLVM IR generation of compound statement ('{}')");
401
83.2k
402
  // Keep track of the current cleanup stack depth, including debug scopes.
403
83.2k
  LexicalScope Scope(*this, S.getSourceRange());
404
83.2k
405
83.2k
  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
406
83.2k
}
407
408
Address
409
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
410
                                              bool GetLast,
411
275k
                                              AggValueSlot AggSlot) {
412
275k
413
275k
  const Stmt *ExprResult = S.getStmtExprResult();
414
275k
  assert((!GetLast || (GetLast && ExprResult)) &&
415
275k
         "If GetLast is true then the CompoundStmt must have a StmtExprResult");
416
275k
417
275k
  Address RetAlloca = Address::invalid();
418
275k
419
562k
  for (auto *CurStmt : S.body()) {
420
562k
    if (GetLast && 
ExprResult == CurStmt15.5k
) {
421
      // We have to special case labels here.  They are statements, but when put
422
      // at the end of a statement expression, they yield the value of their
423
      // subexpression.  Handle this by walking through all labels we encounter,
424
      // emitting them before we evaluate the subexpr.
425
      // Similar issues arise for attributed statements.
426
3.58k
      while (!isa<Expr>(ExprResult)) {
427
5
        if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
428
3
          EmitLabel(LS->getDecl());
429
3
          ExprResult = LS->getSubStmt();
430
2
        } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
431
          // FIXME: Update this if we ever have attributes that affect the
432
          // semantics of an expression.
433
2
          ExprResult = AS->getSubStmt();
434
0
        } else {
435
0
          llvm_unreachable("unknown value statement");
436
0
        }
437
5
      }
438
3.58k
439
3.58k
      EnsureInsertPoint();
440
3.58k
441
3.58k
      const Expr *E = cast<Expr>(ExprResult);
442
3.58k
      QualType ExprTy = E->getType();
443
3.58k
      if (hasAggregateEvaluationKind(ExprTy)) {
444
645
        EmitAggExpr(E, AggSlot);
445
2.93k
      } else {
446
        // We can't return an RValue here because there might be cleanups at
447
        // the end of the StmtExpr.  Because of that, we have to emit the result
448
        // here into a temporary alloca.
449
2.93k
        RetAlloca = CreateMemTemp(ExprTy);
450
2.93k
        EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
451
2.93k
                         /*IsInit*/ false);
452
2.93k
      }
453
559k
    } else {
454
559k
      EmitStmt(CurStmt);
455
559k
    }
456
562k
  }
457
275k
458
275k
  return RetAlloca;
459
275k
}
460
461
722
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
462
722
  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
463
722
464
  // If there is a cleanup stack, then we it isn't worth trying to
465
  // simplify this block (we would need to remove it from the scope map
466
  // and cleanup entry).
467
722
  if (!EHStack.empty())
468
54
    return;
469
668
470
  // Can only simplify direct branches.
471
668
  if (!BI || !BI->isUnconditional())
472
0
    return;
473
668
474
  // Can only simplify empty blocks.
475
668
  if (BI->getIterator() != BB->begin())
476
1
    return;
477
667
478
667
  BB->replaceAllUsesWith(BI->getSuccessor(0));
479
667
  BI->eraseFromParent();
480
667
  BB->eraseFromParent();
481
667
}
482
483
431k
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
484
431k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
485
431k
486
  // Fall out of the current block (if necessary).
487
431k
  EmitBranch(BB);
488
431k
489
431k
  if (IsFinished && 
BB->use_empty()94.5k
) {
490
477
    delete BB;
491
477
    return;
492
477
  }
493
431k
494
  // Place the block after the current block, if possible, or else at
495
  // the end of the function.
496
431k
  if (CurBB && 
CurBB->getParent()286k
)
497
286k
    CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
498
144k
  else
499
144k
    CurFn->getBasicBlockList().push_back(BB);
500
431k
  Builder.SetInsertPoint(BB);
501
431k
}
502
503
549k
void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
504
  // Emit a branch from the current block to the target one if this
505
  // was a real block.  If this was just a fall-through block after a
506
  // terminator, don't emit it.
507
549k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
508
549k
509
549k
  if (!CurBB || 
CurBB->getTerminator()389k
) {
510
    // If there is no insert point or the previous block is already
511
    // terminated, don't touch it.
512
201k
  } else {
513
    // Otherwise, create a fall-through branch.
514
201k
    Builder.CreateBr(Target);
515
201k
  }
516
549k
517
549k
  Builder.ClearInsertionPoint();
518
549k
}
519
520
841
void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
521
841
  bool inserted = false;
522
841
  for (llvm::User *u : block->users()) {
523
841
    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
524
841
      CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
525
841
                                             block);
526
841
      inserted = true;
527
841
      break;
528
841
    }
529
841
  }
530
841
531
841
  if (!inserted)
532
0
    CurFn->getBasicBlockList().push_back(block);
533
841
534
841
  Builder.SetInsertPoint(block);
535
841
}
536
537
CodeGenFunction::JumpDest
538
3.22k
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
539
3.22k
  JumpDest &Dest = LabelMap[D];
540
3.22k
  if (Dest.isValid()) 
return Dest3.08k
;
541
139
542
  // Create, but don't insert, the new block.
543
139
  Dest = JumpDest(createBasicBlock(D->getName()),
544
139
                  EHScopeStack::stable_iterator::invalid(),
545
139
                  NextCleanupDestIndex++);
546
139
  return Dest;
547
139
}
548
549
202
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
550
  // Add this label to the current lexical scope if we're within any
551
  // normal cleanups.  Jumps "in" to this label --- when permitted by
552
  // the language --- may need to be routed around such cleanups.
553
202
  if (EHStack.hasNormalCleanups() && 
CurLexicalScope10
)
554
0
    CurLexicalScope->addLabel(D);
555
202
556
202
  JumpDest &Dest = LabelMap[D];
557
202
558
  // If we didn't need a forward reference to this label, just go
559
  // ahead and create a destination at the current scope.
560
202
  if (!Dest.isValid()) {
561
63
    Dest = getJumpDestInCurrentScope(D->getName());
562
63
563
  // Otherwise, we need to give this label a target depth and remove
564
  // it from the branch-fixups list.
565
139
  } else {
566
139
    assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
567
139
    Dest.setScopeDepth(EHStack.stable_begin());
568
139
    ResolveBranchFixups(Dest.getBlock());
569
139
  }
570
202
571
202
  EmitBlock(Dest.getBlock());
572
202
573
  // Emit debug info for labels.
574
202
  if (CGDebugInfo *DI = getDebugInfo()) {
575
19
    if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
576
19
      DI->setLocation(D->getLocation());
577
19
      DI->EmitLabel(D, Builder);
578
19
    }
579
19
  }
580
202
581
202
  incrementProfileCounter(D->getStmt());
582
202
}
583
584
/// Change the cleanup scope of the labels in this lexical scope to
585
/// match the scope of the enclosing context.
586
0
void CodeGenFunction::LexicalScope::rescopeLabels() {
587
0
  assert(!Labels.empty());
588
0
  EHScopeStack::stable_iterator innermostScope
589
0
    = CGF.EHStack.getInnermostNormalCleanup();
590
0
591
  // Change the scope depth of all the labels.
592
0
  for (SmallVectorImpl<const LabelDecl*>::const_iterator
593
0
         i = Labels.begin(), e = Labels.end(); i != e; ++i) {
594
0
    assert(CGF.LabelMap.count(*i));
595
0
    JumpDest &dest = CGF.LabelMap.find(*i)->second;
596
0
    assert(dest.getScopeDepth().isValid());
597
0
    assert(innermostScope.encloses(dest.getScopeDepth()));
598
0
    dest.setScopeDepth(innermostScope);
599
0
  }
600
0
601
  // Reparent the labels if the new scope also has cleanups.
602
0
  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
603
0
    ParentScope->Labels.append(Labels.begin(), Labels.end());
604
0
  }
605
0
}
606
607
608
199
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
609
199
  EmitLabel(S.getDecl());
610
199
  EmitStmt(S.getSubStmt());
611
199
}
612
613
132
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
614
132
  bool nomerge = false;
615
132
  for (const auto *A : S.getAttrs())
616
185
    if (A->getKind() == attr::NoMerge) {
617
7
      nomerge = true;
618
7
      break;
619
7
    }
620
132
  SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
621
132
  EmitStmt(S.getSubStmt(), S.getAttrs());
622
132
}
623
624
3.12k
void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
625
  // If this code is reachable then emit a stop point (if generating
626
  // debug info). We have to do this ourselves because we are on the
627
  // "simple" statement path.
628
3.12k
  if (HaveInsertPoint())
629
3.11k
    EmitStopPoint(&S);
630
3.12k
631
3.12k
  EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
632
3.12k
}
633
634
635
22
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
636
22
  if (const LabelDecl *Target = S.getConstantTarget()) {
637
0
    EmitBranchThroughCleanup(getJumpDestForLabel(Target));
638
0
    return;
639
0
  }
640
22
641
  // Ensure that we have an i8* for our PHI node.
642
22
  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
643
22
                                         Int8PtrTy, "addr");
644
22
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
645
22
646
  // Get the basic block for the indirect goto.
647
22
  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
648
22
649
  // The first instruction in the block has to be the PHI for the switch dest,
650
  // add an entry for this branch.
651
22
  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
652
22
653
22
  EmitBranch(IndGotoBB);
654
22
}
655
static Optional<std::pair<uint32_t, uint32_t>>
656
3.51k
getLikelihoodWeights(const IfStmt &If) {
657
3.51k
  switch (Stmt::getLikelihood(If.getThen(), If.getElse())) {
658
22
  case Stmt::LH_Unlikely:
659
22
    return std::pair<uint32_t, uint32_t>(llvm::UnlikelyBranchWeight,
660
22
                                         llvm::LikelyBranchWeight);
661
3.49k
  case Stmt::LH_None:
662
3.49k
    return None;
663
2
  case Stmt::LH_Likely:
664
2
    return std::pair<uint32_t, uint32_t>(llvm::LikelyBranchWeight,
665
2
                                         llvm::UnlikelyBranchWeight);
666
0
  }
667
0
  llvm_unreachable("Unknown Likelihood");
668
0
}
669
670
64.1k
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
671
  // C99 6.8.4.1: The first substatement is executed if the expression compares
672
  // unequal to 0.  The condition must be a scalar type.
673
64.1k
  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
674
64.1k
675
64.1k
  if (S.getInit())
676
14
    EmitStmt(S.getInit());
677
64.1k
678
64.1k
  if (S.getConditionVariable())
679
23
    EmitDecl(*S.getConditionVariable());
680
64.1k
681
  // If the condition constant folds and can be elided, try to avoid emitting
682
  // the condition and the dead arm of the if/else.
683
64.1k
  bool CondConstant;
684
64.1k
  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
685
256
                                   S.isConstexpr())) {
686
    // Figure out which block (then or else) is executed.
687
256
    const Stmt *Executed = S.getThen();
688
256
    const Stmt *Skipped  = S.getElse();
689
256
    if (!CondConstant)  // Condition false?
690
151
      std::swap(Executed, Skipped);
691
256
692
    // If the skipped block has no labels in it, just emit the executed block.
693
    // This avoids emitting dead code and simplifies the CFG substantially.
694
256
    if (S.isConstexpr() || 
!ContainsLabel(Skipped)251
) {
695
256
      if (CondConstant)
696
105
        incrementProfileCounter(&S);
697
256
      if (Executed) {
698
143
        RunCleanupsScope ExecutedScope(*this);
699
143
        EmitStmt(Executed);
700
143
      }
701
256
      return;
702
256
    }
703
63.9k
  }
704
63.9k
705
  // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
706
  // the conditional branch.
707
63.9k
  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
708
63.9k
  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
709
63.9k
  llvm::BasicBlock *ElseBlock = ContBlock;
710
63.9k
  if (S.getElse())
711
9.78k
    ElseBlock = createBasicBlock("if.else");
712
63.9k
713
  // Prefer the PGO based weights over the likelihood attribute.
714
  // When the build isn't optimized the metadata isn't used, so don't generate
715
  // it.
716
63.9k
  llvm::MDNode *Weights = nullptr;
717
63.9k
  uint64_t Count = getProfileCount(S.getThen());
718
63.9k
  if (!Count && 
CGM.getCodeGenOpts().OptimizationLevel63.7k
) {
719
3.51k
    Optional<std::pair<uint32_t, uint32_t>> LHW = getLikelihoodWeights(S);
720
3.51k
    if (LHW) {
721
24
      llvm::MDBuilder MDHelper(CGM.getLLVMContext());
722
24
      Weights = MDHelper.createBranchWeights(LHW->first, LHW->second);
723
24
    }
724
3.51k
  }
725
63.9k
726
63.9k
  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, Weights);
727
63.9k
728
  // Emit the 'then' code.
729
63.9k
  EmitBlock(ThenBlock);
730
63.9k
  incrementProfileCounter(&S);
731
63.9k
  {
732
63.9k
    RunCleanupsScope ThenScope(*this);
733
63.9k
    EmitStmt(S.getThen());
734
63.9k
  }
735
63.9k
  EmitBranch(ContBlock);
736
63.9k
737
  // Emit the 'else' code if present.
738
63.9k
  if (const Stmt *Else = S.getElse()) {
739
9.78k
    {
740
      // There is no need to emit line number for an unconditional branch.
741
9.78k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
742
9.78k
      EmitBlock(ElseBlock);
743
9.78k
    }
744
9.78k
    {
745
9.78k
      RunCleanupsScope ElseScope(*this);
746
9.78k
      EmitStmt(Else);
747
9.78k
    }
748
9.78k
    {
749
      // There is no need to emit line number for an unconditional branch.
750
9.78k
      auto NL = ApplyDebugLocation::CreateEmpty(*this);
751
9.78k
      EmitBranch(ContBlock);
752
9.78k
    }
753
9.78k
  }
754
63.9k
755
  // Emit the continuation block for code after the if.
756
63.9k
  EmitBlock(ContBlock, true);
757
63.9k
}
758
759
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
760
1.57k
                                    ArrayRef<const Attr *> WhileAttrs) {
761
  // Emit the header for the loop, which will also become
762
  // the continue target.
763
1.57k
  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
764
1.57k
  EmitBlock(LoopHeader.getBlock());
765
1.57k
766
1.57k
  const SourceRange &R = S.getSourceRange();
767
1.57k
  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
768
1.57k
                 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
769
1.57k
                 SourceLocToDebugLoc(R.getEnd()));
770
1.57k
771
  // Create an exit block for when the condition fails, which will
772
  // also become the break target.
773
1.57k
  JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
774
1.57k
775
  // Store the blocks to use for break and continue.
776
1.57k
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
777
1.57k
778
  // C++ [stmt.while]p2:
779
  //   When the condition of a while statement is a declaration, the
780
  //   scope of the variable that is declared extends from its point
781
  //   of declaration (3.3.2) to the end of the while statement.
782
  //   [...]
783
  //   The object created in a condition is destroyed and created
784
  //   with each iteration of the loop.
785
1.57k
  RunCleanupsScope ConditionScope(*this);
786
1.57k
787
1.57k
  if (S.getConditionVariable())
788
5
    EmitDecl(*S.getConditionVariable());
789
1.57k
790
  // Evaluate the conditional in the while header.  C99 6.8.5.1: The
791
  // evaluation of the controlling expression takes place before each
792
  // execution of the loop body.
793
1.57k
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
794
1.57k
795
  // while(1) is common, avoid extra exit blocks.  Be sure
796
  // to correctly handle break/continue though.
797
1.57k
  bool EmitBoolCondBranch = true;
798
1.57k
  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
799
302
    if (C->isOne())
800
286
      EmitBoolCondBranch = false;
801
1.57k
802
  // As long as the condition is true, go to the loop body.
803
1.57k
  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
804
1.57k
  if (EmitBoolCondBranch) {
805
1.29k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
806
1.29k
    if (ConditionScope.requiresCleanups())
807
3
      ExitBlock = createBasicBlock("while.exit");
808
1.29k
    Builder.CreateCondBr(
809
1.29k
        BoolCondVal, LoopBody, ExitBlock,
810
1.29k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
811
1.29k
812
1.29k
    if (ExitBlock != LoopExit.getBlock()) {
813
3
      EmitBlock(ExitBlock);
814
3
      EmitBranchThroughCleanup(LoopExit);
815
3
    }
816
1.29k
  }
817
1.57k
818
  // Emit the loop body.  We have to emit this in a cleanup scope
819
  // because it might be a singleton DeclStmt.
820
1.57k
  {
821
1.57k
    RunCleanupsScope BodyScope(*this);
822
1.57k
    EmitBlock(LoopBody);
823
1.57k
    incrementProfileCounter(&S);
824
1.57k
    EmitStmt(S.getBody());
825
1.57k
  }
826
1.57k
827
1.57k
  BreakContinueStack.pop_back();
828
1.57k
829
  // Immediately force cleanup.
830
1.57k
  ConditionScope.ForceCleanup();
831
1.57k
832
1.57k
  EmitStopPoint(&S);
833
  // Branch to the loop header again.
834
1.57k
  EmitBranch(LoopHeader.getBlock());
835
1.57k
836
1.57k
  LoopStack.pop();
837
1.57k
838
  // Emit the exit block.
839
1.57k
  EmitBlock(LoopExit.getBlock(), true);
840
1.57k
841
  // The LoopHeader typically is just a branch if we skipped emitting
842
  // a branch, try to erase it.
843
1.57k
  if (!EmitBoolCondBranch)
844
286
    SimplifyForwardingBlocks(LoopHeader.getBlock());
845
1.57k
}
846
847
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
848
566
                                 ArrayRef<const Attr *> DoAttrs) {
849
566
  JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
850
566
  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
851
566
852
566
  uint64_t ParentCount = getCurrentProfileCount();
853
566
854
  // Store the blocks to use for break and continue.
855
566
  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
856
566
857
  // Emit the body of the loop.
858
566
  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
859
566
860
566
  EmitBlockWithFallThrough(LoopBody, &S);
861
566
  {
862
566
    RunCleanupsScope BodyScope(*this);
863
566
    EmitStmt(S.getBody());
864
566
  }
865
566
866
566
  EmitBlock(LoopCond.getBlock());
867
566
868
566
  const SourceRange &R = S.getSourceRange();
869
566
  LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
870
566
                 SourceLocToDebugLoc(R.getBegin()),
871
566
                 SourceLocToDebugLoc(R.getEnd()));
872
566
873
  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
874
  // after each execution of the loop body."
875
566
876
  // Evaluate the conditional in the while header.
877
  // C99 6.8.5p2/p4: The first substatement is executed if the expression
878
  // compares unequal to 0.  The condition must be a scalar type.
879
566
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
880
566
881
566
  BreakContinueStack.pop_back();
882
566
883
  // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
884
  // to correctly handle break/continue though.
885
566
  bool EmitBoolCondBranch = true;
886
566
  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
887
443
    if (C->isZero())
888
436
      EmitBoolCondBranch = false;
889
566
890
  // As long as the condition is true, iterate the loop.
891
566
  if (EmitBoolCondBranch) {
892
130
    uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
893
130
    Builder.CreateCondBr(
894
130
        BoolCondVal, LoopBody, LoopExit.getBlock(),
895
130
        createProfileWeightsForLoop(S.getCond(), BackedgeCount));
896
130
  }
897
566
898
566
  LoopStack.pop();
899
566
900
  // Emit the exit block.
901
566
  EmitBlock(LoopExit.getBlock());
902
566
903
  // The DoCond block typically is just a branch if we skipped
904
  // emitting a branch, try to erase it.
905
566
  if (!EmitBoolCondBranch)
906
436
    SimplifyForwardingBlocks(LoopCond.getBlock());
907
566
}
908
909
void CodeGenFunction::EmitForStmt(const ForStmt &S,
910
9.23k
                                  ArrayRef<const Attr *> ForAttrs) {
911
9.23k
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
912
9.23k
913
9.23k
  LexicalScope ForScope(*this, S.getSourceRange());
914
9.23k
915
  // Evaluate the first part before the loop.
916
9.23k
  if (S.getInit())
917
8.89k
    EmitStmt(S.getInit());
918
9.23k
919
  // Start the loop with a block that tests the condition.
920
  // If there's an increment, the continue scope will be overwritten
921
  // later.
922
9.23k
  JumpDest Continue = getJumpDestInCurrentScope("for.cond");
923
9.23k
  llvm::BasicBlock *CondBlock = Continue.getBlock();
924
9.23k
  EmitBlock(CondBlock);
925
9.23k
926
9.23k
  const SourceRange &R = S.getSourceRange();
927
9.23k
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
928
9.23k
                 SourceLocToDebugLoc(R.getBegin()),
929
9.23k
                 SourceLocToDebugLoc(R.getEnd()));
930
9.23k
931
  // If the for loop doesn't have an increment we can just use the
932
  // condition as the continue block.  Otherwise we'll need to create
933
  // a block for it (in the current scope, i.e. in the scope of the
934
  // condition), and that we will become our continue block.
935
9.23k
  if (S.getInc())
936
9.14k
    Continue = getJumpDestInCurrentScope("for.inc");
937
9.23k
938
  // Store the blocks to use for break and continue.
939
9.23k
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
940
9.23k
941
  // Create a cleanup scope for the condition variable cleanups.
942
9.23k
  LexicalScope ConditionScope(*this, S.getSourceRange());
943
9.23k
944
9.23k
  if (S.getCond()) {
945
    // If the for statement has a condition scope, emit the local variable
946
    // declaration.
947
9.20k
    if (S.getConditionVariable()) {
948
3
      EmitDecl(*S.getConditionVariable());
949
3
    }
950
9.20k
951
9.20k
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
952
    // If there are any cleanups between here and the loop-exit scope,
953
    // create a block to stage a loop exit along.
954
9.20k
    if (ForScope.requiresCleanups())
955
128
      ExitBlock = createBasicBlock("for.cond.cleanup");
956
9.20k
957
    // As long as the condition is true, iterate the loop.
958
9.20k
    llvm::BasicBlock *ForBody = createBasicBlock("for.body");
959
9.20k
960
    // C99 6.8.5p2/p4: The first substatement is executed if the expression
961
    // compares unequal to 0.  The condition must be a scalar type.
962
9.20k
    llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
963
9.20k
    Builder.CreateCondBr(
964
9.20k
        BoolCondVal, ForBody, ExitBlock,
965
9.20k
        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
966
9.20k
967
9.20k
    if (ExitBlock != LoopExit.getBlock()) {
968
128
      EmitBlock(ExitBlock);
969
128
      EmitBranchThroughCleanup(LoopExit);
970
128
    }
971
9.20k
972
9.20k
    EmitBlock(ForBody);
973
31
  } else {
974
    // Treat it as a non-zero constant.  Don't even create a new block for the
975
    // body, just fall into it.
976
31
  }
977
9.23k
  incrementProfileCounter(&S);
978
9.23k
979
9.23k
  {
980
    // Create a separate cleanup scope for the body, in case it is not
981
    // a compound statement.
982
9.23k
    RunCleanupsScope BodyScope(*this);
983
9.23k
    EmitStmt(S.getBody());
984
9.23k
  }
985
9.23k
986
  // If there is an increment, emit it next.
987
9.23k
  if (S.getInc()) {
988
9.14k
    EmitBlock(Continue.getBlock());
989
9.14k
    EmitStmt(S.getInc());
990
9.14k
  }
991
9.23k
992
9.23k
  BreakContinueStack.pop_back();
993
9.23k
994
9.23k
  ConditionScope.ForceCleanup();
995
9.23k
996
9.23k
  EmitStopPoint(&S);
997
9.23k
  EmitBranch(CondBlock);
998
9.23k
999
9.23k
  ForScope.ForceCleanup();
1000
9.23k
1001
9.23k
  LoopStack.pop();
1002
9.23k
1003
  // Emit the fall-through block.
1004
9.23k
  EmitBlock(LoopExit.getBlock(), true);
1005
9.23k
}
1006
1007
void
1008
CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1009
114
                                     ArrayRef<const Attr *> ForAttrs) {
1010
114
  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1011
114
1012
114
  LexicalScope ForScope(*this, S.getSourceRange());
1013
114
1014
  // Evaluate the first pieces before the loop.
1015
114
  if (S.getInit())
1016
2
    EmitStmt(S.getInit());
1017
114
  EmitStmt(S.getRangeStmt());
1018
114
  EmitStmt(S.getBeginStmt());
1019
114
  EmitStmt(S.getEndStmt());
1020
114
1021
  // Start the loop with a block that tests the condition.
1022
  // If there's an increment, the continue scope will be overwritten
1023
  // later.
1024
114
  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1025
114
  EmitBlock(CondBlock);
1026
114
1027
114
  const SourceRange &R = S.getSourceRange();
1028
114
  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1029
114
                 SourceLocToDebugLoc(R.getBegin()),
1030
114
                 SourceLocToDebugLoc(R.getEnd()));
1031
114
1032
  // If there are any cleanups between here and the loop-exit scope,
1033
  // create a block to stage a loop exit along.
1034
114
  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1035
114
  if (ForScope.requiresCleanups())
1036
14
    ExitBlock = createBasicBlock("for.cond.cleanup");
1037
114
1038
  // The loop body, consisting of the specified body and the loop variable.
1039
114
  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1040
114
1041
  // The body is executed if the expression, contextually converted
1042
  // to bool, is true.
1043
114
  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1044
114
  Builder.CreateCondBr(
1045
114
      BoolCondVal, ForBody, ExitBlock,
1046
114
      createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
1047
114
1048
114
  if (ExitBlock != LoopExit.getBlock()) {
1049
14
    EmitBlock(ExitBlock);
1050
14
    EmitBranchThroughCleanup(LoopExit);
1051
14
  }
1052
114
1053
114
  EmitBlock(ForBody);
1054
114
  incrementProfileCounter(&S);
1055
114
1056
  // Create a block for the increment. In case of a 'continue', we jump there.
1057
114
  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1058
114
1059
  // Store the blocks to use for break and continue.
1060
114
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1061
114
1062
114
  {
1063
    // Create a separate cleanup scope for the loop variable and body.
1064
114
    LexicalScope BodyScope(*this, S.getSourceRange());
1065
114
    EmitStmt(S.getLoopVarStmt());
1066
114
    EmitStmt(S.getBody());
1067
114
  }
1068
114
1069
114
  EmitStopPoint(&S);
1070
  // If there is an increment, emit it next.
1071
114
  EmitBlock(Continue.getBlock());
1072
114
  EmitStmt(S.getInc());
1073
114
1074
114
  BreakContinueStack.pop_back();
1075
114
1076
114
  EmitBranch(CondBlock);
1077
114
1078
114
  ForScope.ForceCleanup();
1079
114
1080
114
  LoopStack.pop();
1081
114
1082
  // Emit the fall-through block.
1083
114
  EmitBlock(LoopExit.getBlock(), true);
1084
114
}
1085
1086
438
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1087
438
  if (RV.isScalar()) {
1088
438
    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1089
0
  } else if (RV.isAggregate()) {
1090
0
    LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1091
0
    LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1092
0
    EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1093
0
  } else {
1094
0
    EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1095
0
                       /*init*/ true);
1096
0
  }
1097
438
  EmitBranchThroughCleanup(ReturnBlock);
1098
438
}
1099
1100
namespace {
1101
// RAII struct used to save and restore a return statment's result expression.
1102
struct SaveRetExprRAII {
1103
  SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1104
140k
      : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1105
140k
    CGF.RetExpr = RetExpr;
1106
140k
  }
1107
140k
  ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1108
  const Expr *OldRetExpr;
1109
  CodeGenFunction &CGF;
1110
};
1111
} // namespace
1112
1113
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1114
/// if the function returns void, or may be missing one if the function returns
1115
/// non-void.  Fun stuff :).
1116
140k
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1117
140k
  if (requiresReturnValueCheck()) {
1118
15
    llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1119
15
    auto *SLocPtr =
1120
15
        new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1121
15
                                 llvm::GlobalVariable::PrivateLinkage, SLoc);
1122
15
    SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1123
15
    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1124
15
    assert(ReturnLocation.isValid() && "No valid return location");
1125
15
    Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1126
15
                        ReturnLocation);
1127
15
  }
1128
140k
1129
  // Returning from an outlined SEH helper is UB, and we already warn on it.
1130
140k
  if (IsOutlinedSEHHelper) {
1131
8
    Builder.CreateUnreachable();
1132
8
    Builder.ClearInsertionPoint();
1133
8
  }
1134
140k
1135
  // Emit the result value, even if unused, to evaluate the side effects.
1136
140k
  const Expr *RV = S.getRetValue();
1137
140k
1138
  // Record the result expression of the return statement. The recorded
1139
  // expression is used to determine whether a block capture's lifetime should
1140
  // end at the end of the full expression as opposed to the end of the scope
1141
  // enclosing the block expression.
1142
  //
1143
  // This permits a small, easily-implemented exception to our over-conservative
1144
  // rules about not jumping to statements following block literals with
1145
  // non-trivial cleanups.
1146
140k
  SaveRetExprRAII SaveRetExpr(RV, *this);
1147
140k
1148
140k
  RunCleanupsScope cleanupScope(*this);
1149
140k
  if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1150
5.27k
    RV = EWC->getSubExpr();
1151
  // FIXME: Clean this up by using an LValue for ReturnTemp,
1152
  // EmitStoreThroughLValue, and EmitAnyExpr.
1153
  // Check if the NRVO candidate was not globalized in OpenMP mode.
1154
140k
  if (getLangOpts().ElideConstructors && 
S.getNRVOCandidate()140k
&&
1155
1.30k
      S.getNRVOCandidate()->isNRVOVariable() &&
1156
1.29k
      (!getLangOpts().OpenMP ||
1157
92
       !CGM.getOpenMPRuntime()
1158
92
            .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1159
1.28k
            .isValid())) {
1160
    // Apply the named return value optimization for this return statement,
1161
    // which means doing nothing: the appropriate result has already been
1162
    // constructed into the NRVO variable.
1163
1.28k
1164
    // If there is an NRVO flag for this variable, set it to 1 into indicate
1165
    // that the cleanup code should not destroy the variable.
1166
1.28k
    if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1167
195
      Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1168
139k
  } else if (!ReturnValue.isValid() || 
(136k
RV136k
&&
RV->getType()->isVoidType()136k
)) {
1169
    // Make sure not to return anything, but evaluate the expression
1170
    // for side effects.
1171
3.54k
    if (RV)
1172
2.72k
      EmitAnyExpr(RV);
1173
136k
  } else if (!RV) {
1174
    // Do nothing (return value is left uninitialized)
1175
136k
  } else if (FnRetTy->isReferenceType()) {
1176
    // If this function returns a reference, take the address of the expression
1177
    // rather than the value.
1178
21.5k
    RValue Result = EmitReferenceBindingToExpr(RV);
1179
21.5k
    Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1180
114k
  } else {
1181
114k
    switch (getEvaluationKind(RV->getType())) {
1182
108k
    case TEK_Scalar:
1183
108k
      Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1184
108k
      break;
1185
588
    case TEK_Complex:
1186
588
      EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1187
588
                                /*isInit*/ true);
1188
588
      break;
1189
5.65k
    case TEK_Aggregate:
1190
5.65k
      EmitAggExpr(RV, AggValueSlot::forAddr(
1191
5.65k
                          ReturnValue, Qualifiers(),
1192
5.65k
                          AggValueSlot::IsDestructed,
1193
5.65k
                          AggValueSlot::DoesNotNeedGCBarriers,
1194
5.65k
                          AggValueSlot::IsNotAliased,
1195
5.65k
                          getOverlapForReturnValue()));
1196
5.65k
      break;
1197
140k
    }
1198
140k
  }
1199
140k
1200
140k
  ++NumReturnExprs;
1201
140k
  if (!RV || 
RV->isEvaluatable(getContext())140k
)
1202
14.2k
    ++NumSimpleReturnExprs;
1203
140k
1204
140k
  cleanupScope.ForceCleanup();
1205
140k
  EmitBranchThroughCleanup(ReturnBlock);
1206
140k
}
1207
1208
149k
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1209
  // As long as debug info is modeled with instructions, we have to ensure we
1210
  // have a place to insert here and write the stop point here.
1211
149k
  if (HaveInsertPoint())
1212
149k
    EmitStopPoint(&S);
1213
149k
1214
149k
  for (const auto *I : S.decls())
1215
151k
    EmitDecl(*I);
1216
149k
}
1217
1218
3.02k
void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1219
3.02k
  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1220
3.02k
1221
  // If this code is reachable then emit a stop point (if generating
1222
  // debug info). We have to do this ourselves because we are on the
1223
  // "simple" statement path.
1224
3.02k
  if (HaveInsertPoint())
1225
3.02k
    EmitStopPoint(&S);
1226
3.02k
1227
3.02k
  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1228
3.02k
}
1229
1230
4.41k
void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1231
4.41k
  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1232
4.41k
1233
  // If this code is reachable then emit a stop point (if generating
1234
  // debug info). We have to do this ourselves because we are on the
1235
  // "simple" statement path.
1236
4.41k
  if (HaveInsertPoint())
1237
4.40k
    EmitStopPoint(&S);
1238
4.41k
1239
4.41k
  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1240
4.41k
}
1241
1242
/// EmitCaseStmtRange - If case statement range is not too big then
1243
/// add multiple cases to switch instruction, one for each value within
1244
/// the range. If range is too big then emit "if" condition check.
1245
39
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
1246
39
  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1247
39
1248
39
  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1249
39
  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1250
39
1251
  // Emit the code for this case. We do this first to make sure it is
1252
  // properly chained from our predecessor before generating the
1253
  // switch machinery to enter this block.
1254
39
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1255
39
  EmitBlockWithFallThrough(CaseDest, &S);
1256
39
  EmitStmt(S.getSubStmt());
1257
39
1258
  // If range is empty, do nothing.
1259
39
  if (LHS.isSigned() ? 
RHS.slt(LHS)35
:
RHS.ult(LHS)4
)
1260
4
    return;
1261
35
1262
35
  llvm::APInt Range = RHS - LHS;
1263
  // FIXME: parameters such as this should not be hardcoded.
1264
35
  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1265
    // Range is small enough to add multiple switch instruction cases.
1266
20
    uint64_t Total = getProfileCount(&S);
1267
20
    unsigned NCases = Range.getZExtValue() + 1;
1268
    // We only have one region counter for the entire set of cases here, so we
1269
    // need to divide the weights evenly between the generated cases, ensuring
1270
    // that the total weight is preserved. E.g., a weight of 5 over three cases
1271
    // will be distributed as weights of 2, 2, and 1.
1272
20
    uint64_t Weight = Total / NCases, Rem = Total % NCases;
1273
91
    for (unsigned I = 0; I != NCases; 
++I71
) {
1274
71
      if (SwitchWeights)
1275
24
        SwitchWeights->push_back(Weight + (Rem ? 
14
:
020
));
1276
71
      if (Rem)
1277
4
        Rem--;
1278
71
      SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1279
71
      ++LHS;
1280
71
    }
1281
20
    return;
1282
20
  }
1283
15
1284
  // The range is too big. Emit "if" condition into a new block,
1285
  // making sure to save and restore the current insertion point.
1286
15
  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1287
15
1288
  // Push this test onto the chain of range checks (which terminates
1289
  // in the default basic block). The switch's default will be changed
1290
  // to the top of this chain after switch emission is complete.
1291
15
  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1292
15
  CaseRangeBlock = createBasicBlock("sw.caserange");
1293
15
1294
15
  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1295
15
  Builder.SetInsertPoint(CaseRangeBlock);
1296
15
1297
  // Emit range check.
1298
15
  llvm::Value *Diff =
1299
15
    Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1300
15
  llvm::Value *Cond =
1301
15
    Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1302
15
1303
15
  llvm::MDNode *Weights = nullptr;
1304
15
  if (SwitchWeights) {
1305
8
    uint64_t ThisCount = getProfileCount(&S);
1306
8
    uint64_t DefaultCount = (*SwitchWeights)[0];
1307
8
    Weights = createProfileWeights(ThisCount, DefaultCount);
1308
8
1309
    // Since we're chaining the switch default through each large case range, we
1310
    // need to update the weight for the default, ie, the first case, to include
1311
    // this case.
1312
8
    (*SwitchWeights)[0] += ThisCount;
1313
8
  }
1314
15
  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1315
15
1316
  // Restore the appropriate insertion point.
1317
15
  if (RestoreBB)
1318
13
    Builder.SetInsertPoint(RestoreBB);
1319
2
  else
1320
2
    Builder.ClearInsertionPoint();
1321
15
}
1322
1323
934
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
1324
  // If there is no enclosing switch instance that we're aware of, then this
1325
  // case statement and its block can be elided.  This situation only happens
1326
  // when we've constant-folded the switch, are emitting the constant case,
1327
  // and part of the constant case includes another case statement.  For
1328
  // instance: switch (4) { case 4: do { case 5: } while (1); }
1329
934
  if (!SwitchInsn) {
1330
2
    EmitStmt(S.getSubStmt());
1331
2
    return;
1332
2
  }
1333
932
1334
  // Handle case ranges.
1335
932
  if (S.getRHS()) {
1336
39
    EmitCaseStmtRange(S);
1337
39
    return;
1338
39
  }
1339
893
1340
893
  llvm::ConstantInt *CaseVal =
1341
893
    Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1342
893
1343
  // If the body of the case is just a 'break', try to not emit an empty block.
1344
  // If we're profiling or we're not optimizing, leave the block in for better
1345
  // debug and coverage analysis.
1346
893
  if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1347
863
      CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1348
90
      isa<BreakStmt>(S.getSubStmt())) {
1349
0
    JumpDest Block = BreakContinueStack.back().BreakBlock;
1350
0
1351
    // Only do this optimization if there are no cleanups that need emitting.
1352
0
    if (isObviouslyBranchWithoutCleanups(Block)) {
1353
0
      if (SwitchWeights)
1354
0
        SwitchWeights->push_back(getProfileCount(&S));
1355
0
      SwitchInsn->addCase(CaseVal, Block.getBlock());
1356
0
1357
      // If there was a fallthrough into this case, make sure to redirect it to
1358
      // the end of the switch as well.
1359
0
      if (Builder.GetInsertBlock()) {
1360
0
        Builder.CreateBr(Block.getBlock());
1361
0
        Builder.ClearInsertionPoint();
1362
0
      }
1363
0
      return;
1364
0
    }
1365
893
  }
1366
893
1367
893
  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1368
893
  EmitBlockWithFallThrough(CaseDest, &S);
1369
893
  if (SwitchWeights)
1370
54
    SwitchWeights->push_back(getProfileCount(&S));
1371
893
  SwitchInsn->addCase(CaseVal, CaseDest);
1372
893
1373
  // Recursively emitting the statement is acceptable, but is not wonderful for
1374
  // code where we have many case statements nested together, i.e.:
1375
  //  case 1:
1376
  //    case 2:
1377
  //      case 3: etc.
1378
  // Handling this recursively will create a new block for each case statement
1379
  // that falls through to the next case which is IR intensive.  It also causes
1380
  // deep recursion which can run into stack depth limitations.  Handle
1381
  // sequential non-range case statements specially.
1382
893
  const CaseStmt *CurCase = &S;
1383
893
  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1384
893
1385
  // Otherwise, iteratively add consecutive cases to this switch stmt.
1386
1.00k
  while (NextCase && 
NextCase->getRHS() == nullptr115
) {
1387
109
    CurCase = NextCase;
1388
109
    llvm::ConstantInt *CaseVal =
1389
109
      Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1390
109
1391
109
    if (SwitchWeights)
1392
7
      SwitchWeights->push_back(getProfileCount(NextCase));
1393
109
    if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1394
3
      CaseDest = createBasicBlock("sw.bb");
1395
3
      EmitBlockWithFallThrough(CaseDest, &S);
1396
3
    }
1397
109
1398
109
    SwitchInsn->addCase(CaseVal, CaseDest);
1399
109
    NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1400
109
  }
1401
893
1402
  // Normal default recursion for non-cases.
1403
893
  EmitStmt(CurCase->getSubStmt());
1404
893
}
1405
1406
178
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1407
  // If there is no enclosing switch instance that we're aware of, then this
1408
  // default statement can be elided. This situation only happens when we've
1409
  // constant-folded the switch.
1410
178
  if (!SwitchInsn) {
1411
1
    EmitStmt(S.getSubStmt());
1412
1
    return;
1413
1
  }
1414
177
1415
177
  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1416
177
  assert(DefaultBlock->empty() &&
1417
177
         "EmitDefaultStmt: Default block already defined?");
1418
177
1419
177
  EmitBlockWithFallThrough(DefaultBlock, &S);
1420
177
1421
177
  EmitStmt(S.getSubStmt());
1422
177
}
1423
1424
/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1425
/// constant value that is being switched on, see if we can dead code eliminate
1426
/// the body of the switch to a simple series of statements to emit.  Basically,
1427
/// on a switch (5) we want to find these statements:
1428
///    case 5:
1429
///      printf(...);    <--
1430
///      ++i;            <--
1431
///      break;
1432
///
1433
/// and add them to the ResultStmts vector.  If it is unsafe to do this
1434
/// transformation (for example, one of the elided statements contains a label
1435
/// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1436
/// should include statements after it (e.g. the printf() line is a substmt of
1437
/// the case) then return CSFC_FallThrough.  If we handled it and found a break
1438
/// statement, then return CSFC_Success.
1439
///
1440
/// If Case is non-null, then we are looking for the specified case, checking
1441
/// that nothing we jump over contains labels.  If Case is null, then we found
1442
/// the case and are looking for the break.
1443
///
1444
/// If the recursive walk actually finds our Case, then we set FoundCase to
1445
/// true.
1446
///
1447
enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1448
static CSFC_Result CollectStatementsForCase(const Stmt *S,
1449
                                            const SwitchCase *Case,
1450
                                            bool &FoundCase,
1451
497
                              SmallVectorImpl<const Stmt*> &ResultStmts) {
1452
  // If this is a null statement, just succeed.
1453
497
  if (!S)
1454
0
    return Case ? CSFC_Success : CSFC_FallThrough;
1455
497
1456
  // If this is the switchcase (case 4: or default) that we're looking for, then
1457
  // we're in business.  Just add the substatement.
1458
497
  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1459
146
    if (S == Case) {
1460
78
      FoundCase = true;
1461
78
      return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1462
78
                                      ResultStmts);
1463
78
    }
1464
68
1465
    // Otherwise, this is some other case or default statement, just ignore it.
1466
68
    return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1467
68
                                    ResultStmts);
1468
68
  }
1469
351
1470
  // If we are in the live part of the code and we found our break statement,
1471
  // return a success!
1472
351
  if (!Case && 
isa<BreakStmt>(S)146
)
1473
60
    return CSFC_Success;
1474
291
1475
  // If this is a switch statement, then it might contain the SwitchCase, the
1476
  // break, or neither.
1477
291
  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1478
    // Handle this as two cases: we might be looking for the SwitchCase (if so
1479
    // the skipped statements must be skippable) or we might already have it.
1480
83
    CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1481
83
    bool StartedInLiveCode = FoundCase;
1482
83
    unsigned StartSize = ResultStmts.size();
1483
83
1484
    // If we've not found the case yet, scan through looking for it.
1485
83
    if (Case) {
1486
      // Keep track of whether we see a skipped declaration.  The code could be
1487
      // using the declaration even if it is skipped, so we can't optimize out
1488
      // the decl if the kept statements might refer to it.
1489
76
      bool HadSkippedDecl = false;
1490
76
1491
      // If we're looking for the case, just see if we can skip each of the
1492
      // substatements.
1493
268
      for (; Case && 
I != E206
;
++I192
) {
1494
205
        HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1495
205
1496
205
        switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1497
2
        case CSFC_Failure: return CSFC_Failure;
1498
138
        case CSFC_Success:
1499
          // A successful result means that either 1) that the statement doesn't
1500
          // have the case and is skippable, or 2) does contain the case value
1501
          // and also contains the break to exit the switch.  In the later case,
1502
          // we just verify the rest of the statements are elidable.
1503
138
          if (FoundCase) {
1504
            // If we found the case and skipped declarations, we can't do the
1505
            // optimization.
1506
8
            if (HadSkippedDecl)
1507
0
              return CSFC_Failure;
1508
8
1509
18
            
for (++I; 8
I != E;
++I10
)
1510
10
              if (CodeGenFunction::ContainsLabel(*I, true))
1511
0
                return CSFC_Failure;
1512
8
            return CSFC_Success;
1513
130
          }
1514
130
          break;
1515
65
        case CSFC_FallThrough:
1516
          // If we have a fallthrough condition, then we must have found the
1517
          // case started to include statements.  Consider the rest of the
1518
          // statements in the compound statement as candidates for inclusion.
1519
65
          assert(FoundCase && "Didn't find case but returned fallthrough?");
1520
          // We recursively found Case, so we're not looking for it anymore.
1521
65
          Case = nullptr;
1522
65
1523
          // If we found the case and skipped declarations, we can't do the
1524
          // optimization.
1525
65
          if (HadSkippedDecl)
1526
3
            return CSFC_Failure;
1527
62
          break;
1528
205
        }
1529
205
      }
1530
76
1531
63
      if (!FoundCase)
1532
1
        return CSFC_Success;
1533
62
1534
62
      assert(!HadSkippedDecl && "fallthrough after skipping decl");
1535
62
    }
1536
83
1537
    // If we have statements in our range, then we know that the statements are
1538
    // live and need to be added to the set of statements we're tracking.
1539
69
    bool AnyDecls = false;
1540
84
    for (; I != E; 
++I15
) {
1541
68
      AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1542
68
1543
68
      switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1544
0
      case CSFC_Failure: return CSFC_Failure;
1545
15
      case CSFC_FallThrough:
1546
        // A fallthrough result means that the statement was simple and just
1547
        // included in ResultStmt, keep adding them afterwards.
1548
15
        break;
1549
53
      case CSFC_Success:
1550
        // A successful result means that we found the break statement and
1551
        // stopped statement inclusion.  We just ensure that any leftover stmts
1552
        // are skippable and return success ourselves.
1553
165
        for (++I; I != E; 
++I112
)
1554
112
          if (CodeGenFunction::ContainsLabel(*I, true))
1555
0
            return CSFC_Failure;
1556
53
        return CSFC_Success;
1557
68
      }
1558
68
    }
1559
69
1560
    // If we're about to fall out of a scope without hitting a 'break;', we
1561
    // can't perform the optimization if there were any decls in that scope
1562
    // (we'd lose their end-of-lifetime).
1563
16
    if (AnyDecls) {
1564
      // If the entire compound statement was live, there's one more thing we
1565
      // can try before giving up: emit the whole thing as a single statement.
1566
      // We can do that unless the statement contains a 'break;'.
1567
      // FIXME: Such a break must be at the end of a construct within this one.
1568
      // We could emit this by just ignoring the BreakStmts entirely.
1569
3
      if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1570
3
        ResultStmts.resize(StartSize);
1571
3
        ResultStmts.push_back(S);
1572
0
      } else {
1573
0
        return CSFC_Failure;
1574
0
      }
1575
16
    }
1576
16
1577
16
    return CSFC_FallThrough;
1578
16
  }
1579
208
1580
  // Okay, this is some other statement that we don't handle explicitly, like a
1581
  // for statement or increment etc.  If we are skipping over this statement,
1582
  // just verify it doesn't have labels, which would make it invalid to elide.
1583
208
  if (Case) {
1584
129
    if (CodeGenFunction::ContainsLabel(S, true))
1585
0
      return CSFC_Failure;
1586
129
    return CSFC_Success;
1587
129
  }
1588
79
1589
  // Otherwise, we want to include this statement.  Everything is cool with that
1590
  // so long as it doesn't contain a break out of the switch we're in.
1591
79
  if (CodeGenFunction::containsBreak(S)) 
return CSFC_Failure1
;
1592
78
1593
  // Otherwise, everything is great.  Include the statement and tell the caller
1594
  // that we fall through and include the next statement as well.
1595
78
  ResultStmts.push_back(S);
1596
78
  return CSFC_FallThrough;
1597
78
}
1598
1599
/// FindCaseStatementsForValue - Find the case statement being jumped to and
1600
/// then invoke CollectStatementsForCase to find the list of statements to emit
1601
/// for a switch on constant.  See the comment above CollectStatementsForCase
1602
/// for more details.
1603
static bool FindCaseStatementsForValue(const SwitchStmt &S,
1604
                                       const llvm::APSInt &ConstantCondValue,
1605
                                SmallVectorImpl<const Stmt*> &ResultStmts,
1606
                                       ASTContext &C,
1607
94
                                       const SwitchCase *&ResultCase) {
1608
  // First step, find the switch case that is being branched to.  We can do this
1609
  // efficiently by scanning the SwitchCase list.
1610
94
  const SwitchCase *Case = S.getSwitchCaseList();
1611
94
  const DefaultStmt *DefaultCase = nullptr;
1612
94
1613
234
  for (; Case; 
Case = Case->getNextSwitchCase()140
) {
1614
    // It's either a default or case.  Just remember the default statement in
1615
    // case we're not jumping to any numbered cases.
1616
198
    if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1617
66
      DefaultCase = DS;
1618
66
      continue;
1619
66
    }
1620
132
1621
    // Check to see if this case is the one we're looking for.
1622
132
    const CaseStmt *CS = cast<CaseStmt>(Case);
1623
    // Don't handle case ranges yet.
1624
132
    if (CS->getRHS()) 
return false7
;
1625
125
1626
    // If we found our case, remember it as 'case'.
1627
125
    if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1628
51
      break;
1629
125
  }
1630
94
1631
  // If we didn't find a matching case, we use a default if it exists, or we
1632
  // elide the whole switch body!
1633
87
  if (!Case) {
1634
    // It is safe to elide the body of the switch if it doesn't contain labels
1635
    // etc.  If it is safe, return successfully with an empty ResultStmts list.
1636
36
    if (!DefaultCase)
1637
9
      return !CodeGenFunction::ContainsLabel(&S);
1638
27
    Case = DefaultCase;
1639
27
  }
1640
87
1641
  // Ok, we know which case is being jumped to, try to collect all the
1642
  // statements that follow it.  This can fail for a variety of reasons.  Also,
1643
  // check to see that the recursive walk actually found our case statement.
1644
  // Insane cases like this can fail to find it in the recursive walk since we
1645
  // don't handle every stmt kind:
1646
  // switch (4) {
1647
  //   while (1) {
1648
  //     case 4: ...
1649
78
  bool FoundCase = false;
1650
78
  ResultCase = Case;
1651
78
  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1652
78
                                  ResultStmts) != CSFC_Failure &&
1653
74
         FoundCase;
1654
87
}
1655
1656
467
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1657
  // Handle nested switch statements.
1658
467
  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1659
467
  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1660
467
  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1661
467
1662
  // See if we can constant fold the condition of the switch and therefore only
1663
  // emit the live case statement (if any) of the switch.
1664
467
  llvm::APSInt ConstantCondValue;
1665
467
  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1666
94
    SmallVector<const Stmt*, 4> CaseStmts;
1667
94
    const SwitchCase *Case = nullptr;
1668
94
    if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1669
83
                                   getContext(), Case)) {
1670
83
      if (Case)
1671
74
        incrementProfileCounter(Case);
1672
83
      RunCleanupsScope ExecutedScope(*this);
1673
83
1674
83
      if (S.getInit())
1675
0
        EmitStmt(S.getInit());
1676
83
1677
      // Emit the condition variable if needed inside the entire cleanup scope
1678
      // used by this special case for constant folded switches.
1679
83
      if (S.getConditionVariable())
1680
0
        EmitDecl(*S.getConditionVariable());
1681
83
1682
      // At this point, we are no longer "within" a switch instance, so
1683
      // we can temporarily enforce this to ensure that any embedded case
1684
      // statements are not emitted.
1685
83
      SwitchInsn = nullptr;
1686
83
1687
      // Okay, we can dead code eliminate everything except this case.  Emit the
1688
      // specified series of statements and we're good.
1689
155
      for (unsigned i = 0, e = CaseStmts.size(); i != e; 
++i72
)
1690
72
        EmitStmt(CaseStmts[i]);
1691
83
      incrementProfileCounter(&S);
1692
83
1693
      // Now we want to restore the saved switch instance so that nested
1694
      // switches continue to function properly
1695
83
      SwitchInsn = SavedSwitchInsn;
1696
83
1697
83
      return;
1698
83
    }
1699
384
  }
1700
384
1701
384
  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1702
384
1703
384
  RunCleanupsScope ConditionScope(*this);
1704
384
1705
384
  if (S.getInit())
1706
7
    EmitStmt(S.getInit());
1707
384
1708
384
  if (S.getConditionVariable())
1709
4
    EmitDecl(*S.getConditionVariable());
1710
384
  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1711
384
1712
  // Create basic block to hold stuff that comes after switch
1713
  // statement. We also need to create a default block now so that
1714
  // explicit case ranges tests can have a place to jump to on
1715
  // failure.
1716
384
  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1717
384
  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1718
384
  if (PGO.haveRegionCounts()) {
1719
    // Walk the SwitchCase list to find how many there are.
1720
27
    uint64_t DefaultCount = 0;
1721
27
    unsigned NumCases = 0;
1722
27
    for (const SwitchCase *Case = S.getSwitchCaseList();
1723
123
         Case;
1724
96
         Case = Case->getNextSwitchCase()) {
1725
96
      if (isa<DefaultStmt>(Case))
1726
19
        DefaultCount = getProfileCount(Case);
1727
96
      NumCases += 1;
1728
96
    }
1729
27
    SwitchWeights = new SmallVector<uint64_t, 16>();
1730
27
    SwitchWeights->reserve(NumCases);
1731
    // The default needs to be first. We store the edge count, so we already
1732
    // know the right weight.
1733
27
    SwitchWeights->push_back(DefaultCount);
1734
27
  }
1735
384
  CaseRangeBlock = DefaultBlock;
1736
384
1737
  // Clear the insertion point to indicate we are in unreachable code.
1738
384
  Builder.ClearInsertionPoint();
1739
384
1740
  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1741
  // then reuse last ContinueBlock.
1742
384
  JumpDest OuterContinue;
1743
384
  if (!BreakContinueStack.empty())
1744
48
    OuterContinue = BreakContinueStack.back().ContinueBlock;
1745
384
1746
384
  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1747
384
1748
  // Emit switch body.
1749
384
  EmitStmt(S.getBody());
1750
384
1751
384
  BreakContinueStack.pop_back();
1752
384
1753
  // Update the default block in case explicit case range tests have
1754
  // been chained on top.
1755
384
  SwitchInsn->setDefaultDest(CaseRangeBlock);
1756
384
1757
  // If a default was never emitted:
1758
384
  if (!DefaultBlock->getParent()) {
1759
    // If we have cleanups, emit the default block so that there's a
1760
    // place to jump through the cleanups from.
1761
207
    if (ConditionScope.requiresCleanups()) {
1762
0
      EmitBlock(DefaultBlock);
1763
0
1764
    // Otherwise, just forward the default block to the switch end.
1765
207
    } else {
1766
207
      DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1767
207
      delete DefaultBlock;
1768
207
    }
1769
207
  }
1770
384
1771
384
  ConditionScope.ForceCleanup();
1772
384
1773
  // Emit continuation.
1774
384
  EmitBlock(SwitchExit.getBlock(), true);
1775
384
  incrementProfileCounter(&S);
1776
384
1777
  // If the switch has a condition wrapped by __builtin_unpredictable,
1778
  // create metadata that specifies that the switch is unpredictable.
1779
  // Don't bother if not optimizing because that metadata would not be used.
1780
384
  auto *Call = dyn_cast<CallExpr>(S.getCond());
1781
384
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 042
) {
1782
12
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1783
12
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1784
2
      llvm::MDBuilder MDHelper(getLLVMContext());
1785
2
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1786
2
                              MDHelper.createUnpredictable());
1787
2
    }
1788
12
  }
1789
384
1790
384
  if (SwitchWeights) {
1791
27
    assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1792
27
           "switch weights do not match switch cases");
1793
    // If there's only one jump destination there's no sense weighting it.
1794
27
    if (SwitchWeights->size() > 1)
1795
22
      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1796
22
                              createProfileWeights(*SwitchWeights));
1797
27
    delete SwitchWeights;
1798
27
  }
1799
384
  SwitchInsn = SavedSwitchInsn;
1800
384
  SwitchWeights = SavedSwitchWeights;
1801
384
  CaseRangeBlock = SavedCRBlock;
1802
384
}
1803
1804
static std::string
1805
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1806
2.80k
                 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1807
2.80k
  std::string Result;
1808
2.80k
1809
6.99k
  while (*Constraint) {
1810
4.18k
    switch (*Constraint) {
1811
3.39k
    default:
1812
3.39k
      Result += Target.convertConstraint(Constraint);
1813
3.39k
      break;
1814
    // Ignore these
1815
3
    case '*':
1816
3
    case '?':
1817
3
    case '!':
1818
3
    case '=': // Will see this and the following in mult-alt constraints.
1819
3
    case '+':
1820
3
      break;
1821
1
    case '#': // Ignore the rest of the constraint alternative.
1822
3
      while (Constraint[1] && Constraint[1] != ',')
1823
2
        Constraint++;
1824
1
      break;
1825
16
    case '&':
1826
16
    case '%':
1827
16
      Result += *Constraint;
1828
18
      while (Constraint[1] && Constraint[1] == *Constraint)
1829
2
        Constraint++;
1830
16
      break;
1831
621
    case ',':
1832
621
      Result += "|";
1833
621
      break;
1834
143
    case 'g':
1835
143
      Result += "imr";
1836
143
      break;
1837
6
    case '[': {
1838
6
      assert(OutCons &&
1839
6
             "Must pass output names to constraints with a symbolic name");
1840
6
      unsigned Index;
1841
6
      bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1842
6
      assert(result && "Could not resolve symbolic name"); (void)result;
1843
6
      Result += llvm::utostr(Index);
1844
6
      break;
1845
4.18k
    }
1846
4.18k
    }
1847
4.18k
1848
4.18k
    Constraint++;
1849
4.18k
  }
1850
2.80k
1851
2.80k
  return Result;
1852
2.80k
}
1853
1854
/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1855
/// as using a particular register add that as a constraint that will be used
1856
/// in this asm stmt.
1857
static std::string
1858
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1859
                       const TargetInfo &Target, CodeGenModule &CGM,
1860
2.80k
                       const AsmStmt &Stmt, const bool EarlyClobber) {
1861
2.80k
  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1862
2.80k
  if (!AsmDeclRef)
1863
800
    return Constraint;
1864
2.00k
  const ValueDecl &Value = *AsmDeclRef->getDecl();
1865
2.00k
  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1866
2.00k
  if (!Variable)
1867
11
    return Constraint;
1868
1.99k
  if (Variable->getStorageClass() != SC_Register)
1869
1.36k
    return Constraint;
1870
637
  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1871
637
  if (!Attr)
1872
576
    return Constraint;
1873
61
  StringRef Register = Attr->getLabel();
1874
61
  assert(Target.isValidGCCRegisterName(Register));
1875
  // We're using validateOutputConstraint here because we only care if
1876
  // this is a register constraint.
1877
61
  TargetInfo::ConstraintInfo Info(Constraint, "");
1878
61
  if (Target.validateOutputConstraint(Info) &&
1879
0
      !Info.allowsRegister()) {
1880
0
    CGM.ErrorUnsupported(&Stmt, "__asm__");
1881
0
    return Constraint;
1882
0
  }
1883
  // Canonicalize the register here before returning it.
1884
61
  Register = Target.getNormalizedGCCRegisterName(Register);
1885
57
  return (EarlyClobber ? 
"&{"4
: "{") + Register.str() + "}";
1886
61
}
1887
1888
llvm::Value*
1889
CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1890
                                    LValue InputValue, QualType InputType,
1891
                                    std::string &ConstraintStr,
1892
284
                                    SourceLocation Loc) {
1893
284
  llvm::Value *Arg;
1894
284
  if (Info.allowsRegister() || 
!Info.allowsMemory()162
) {
1895
122
    if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1896
119
      Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1897
3
    } else {
1898
3
      llvm::Type *Ty = ConvertType(InputType);
1899
3
      uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1900
3
      if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1901
3
        Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1902
3
        Ty = llvm::PointerType::getUnqual(Ty);
1903
3
1904
3
        Arg = Builder.CreateLoad(
1905
3
            Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
1906
0
      } else {
1907
0
        Arg = InputValue.getPointer(*this);
1908
0
        ConstraintStr += '*';
1909
0
      }
1910
3
    }
1911
162
  } else {
1912
162
    Arg = InputValue.getPointer(*this);
1913
162
    ConstraintStr += '*';
1914
162
  }
1915
284
1916
284
  return Arg;
1917
284
}
1918
1919
llvm::Value* CodeGenFunction::EmitAsmInput(
1920
                                         const TargetInfo::ConstraintInfo &Info,
1921
                                           const Expr *InputExpr,
1922
1.66k
                                           std::string &ConstraintStr) {
1923
  // If this can't be a register or memory, i.e., has to be a constant
1924
  // (immediate or symbolic), try to emit it as such.
1925
1.66k
  if (!Info.allowsRegister() && 
!Info.allowsMemory()426
) {
1926
276
    if (Info.requiresImmediateConstant()) {
1927
68
      Expr::EvalResult EVResult;
1928
68
      InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
1929
68
1930
68
      llvm::APSInt IntResult;
1931
68
      if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
1932
68
                                          getContext()))
1933
67
        return llvm::ConstantInt::get(getLLVMContext(), IntResult);
1934
209
    }
1935
209
1936
209
    Expr::EvalResult Result;
1937
209
    if (InputExpr->EvaluateAsInt(Result, getContext()))
1938
160
      return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
1939
1.43k
  }
1940
1.43k
1941
1.43k
  if (Info.allowsRegister() || 
!Info.allowsMemory()199
)
1942
1.28k
    if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1943
1.28k
      return EmitScalarExpr(InputExpr);
1944
153
  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1945
3
    return EmitScalarExpr(InputExpr);
1946
150
  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1947
150
  LValue Dest = EmitLValue(InputExpr);
1948
150
  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1949
150
                            InputExpr->getExprLoc());
1950
150
}
1951
1952
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1953
/// asm call instruction.  The !srcloc MDNode contains a list of constant
1954
/// integers which are the source locations of the start of each line in the
1955
/// asm.
1956
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1957
1.43k
                                      CodeGenFunction &CGF) {
1958
1.43k
  SmallVector<llvm::Metadata *, 8> Locs;
1959
  // Add the location of the first line to the MDNode.
1960
1.43k
  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1961
1.43k
      CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
1962
1.43k
  StringRef StrVal = Str->getString();
1963
1.43k
  if (!StrVal.empty()) {
1964
1.37k
    const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1965
1.37k
    const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1966
1.37k
    unsigned StartToken = 0;
1967
1.37k
    unsigned ByteOffset = 0;
1968
1.37k
1969
    // Add the location of the start of each subsequent line of the asm to the
1970
    // MDNode.
1971
26.1k
    for (unsigned i = 0, e = StrVal.size() - 1; i != e; 
++i24.8k
) {
1972
24.8k
      if (StrVal[i] != '\n') 
continue24.2k
;
1973
572
      SourceLocation LineLoc = Str->getLocationOfByte(
1974
572
          i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1975
572
      Locs.push_back(llvm::ConstantAsMetadata::get(
1976
572
          llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1977
572
    }
1978
1.37k
  }
1979
1.43k
1980
1.43k
  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1981
1.43k
}
1982
1983
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
1984
                              bool ReadOnly, bool ReadNone, bool NoMerge,
1985
                              const AsmStmt &S,
1986
                              const std::vector<llvm::Type *> &ResultRegTypes,
1987
                              CodeGenFunction &CGF,
1988
1.58k
                              std::vector<llvm::Value *> &RegResults) {
1989
1.58k
  Result.addAttribute(llvm::AttributeList::FunctionIndex,
1990
1.58k
                      llvm::Attribute::NoUnwind);
1991
1.58k
  if (NoMerge)
1992
1
    Result.addAttribute(llvm::AttributeList::FunctionIndex,
1993
1
                        llvm::Attribute::NoMerge);
1994
  // Attach readnone and readonly attributes.
1995
1.58k
  if (!HasSideEffect) {
1996
761
    if (ReadNone)
1997
301
      Result.addAttribute(llvm::AttributeList::FunctionIndex,
1998
301
                          llvm::Attribute::ReadNone);
1999
460
    else if (ReadOnly)
2000
319
      Result.addAttribute(llvm::AttributeList::FunctionIndex,
2001
319
                          llvm::Attribute::ReadOnly);
2002
761
  }
2003
1.58k
2004
  // Slap the source location of the inline asm into a !srcloc metadata on the
2005
  // call.
2006
1.58k
  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2007
1.43k
    Result.setMetadata("srcloc",
2008
1.43k
                       getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2009
154
  else {
2010
    // At least put the line number on MS inline asm blobs.
2011
154
    llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
2012
154
                                        S.getAsmLoc().getRawEncoding());
2013
154
    Result.setMetadata("srcloc",
2014
154
                       llvm::MDNode::get(CGF.getLLVMContext(),
2015
154
                                         llvm::ConstantAsMetadata::get(Loc)));
2016
154
  }
2017
1.58k
2018
1.58k
  if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2019
    // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2020
    // convergent (meaning, they may call an intrinsically convergent op, such
2021
    // as bar.sync, and so can't have certain optimizations applied around
2022
    // them).
2023
13
    Result.addAttribute(llvm::AttributeList::FunctionIndex,
2024
13
                        llvm::Attribute::Convergent);
2025
  // Extract all of the register value results from the asm.
2026
1.58k
  if (ResultRegTypes.size() == 1) {
2027
790
    RegResults.push_back(&Result);
2028
798
  } else {
2029
1.00k
    for (unsigned i = 0, e = ResultRegTypes.size(); i != e; 
++i202
) {
2030
202
      llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2031
202
      RegResults.push_back(Tmp);
2032
202
    }
2033
798
  }
2034
1.58k
}
2035
2036
1.58k
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2037
  // Assemble the final asm string.
2038
1.58k
  std::string AsmString = S.generateAsmString(getContext());
2039
1.58k
2040
  // Get all the output and input constraints together.
2041
1.58k
  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2042
1.58k
  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2043
1.58k
2044
2.73k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.14k
) {
2045
1.14k
    StringRef Name;
2046
1.14k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2047
1.12k
      Name = GAS->getOutputName(i);
2048
1.14k
    TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2049
1.14k
    bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2050
1.14k
    assert(IsValid && "Failed to parse output constraint");
2051
1.14k
    OutputConstraintInfos.push_back(Info);
2052
1.14k
  }
2053
1.58k
2054
3.24k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.66k
) {
2055
1.66k
    StringRef Name;
2056
1.66k
    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2057
1.55k
      Name = GAS->getInputName(i);
2058
1.66k
    TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2059
1.66k
    bool IsValid =
2060
1.66k
      getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2061
1.66k
    assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2062
1.66k
    InputConstraintInfos.push_back(Info);
2063
1.66k
  }
2064
1.58k
2065
1.58k
  std::string Constraints;
2066
1.58k
2067
1.58k
  std::vector<LValue> ResultRegDests;
2068
1.58k
  std::vector<QualType> ResultRegQualTys;
2069
1.58k
  std::vector<llvm::Type *> ResultRegTypes;
2070
1.58k
  std::vector<llvm::Type *> ResultTruncRegTypes;
2071
1.58k
  std::vector<llvm::Type *> ArgTypes;
2072
1.58k
  std::vector<llvm::Value*> Args;
2073
1.58k
  llvm::BitVector ResultTypeRequiresCast;
2074
1.58k
2075
  // Keep track of inout constraints.
2076
1.58k
  std::string InOutConstraints;
2077
1.58k
  std::vector<llvm::Value*> InOutArgs;
2078
1.58k
  std::vector<llvm::Type*> InOutArgTypes;
2079
1.58k
2080
  // Keep track of out constraints for tied input operand.
2081
1.58k
  std::vector<std::string> OutputConstraints;
2082
1.58k
2083
  // An inline asm can be marked readonly if it meets the following conditions:
2084
  //  - it doesn't have any sideeffects
2085
  //  - it doesn't clobber memory
2086
  //  - it doesn't return a value by-reference
2087
  // It can be marked readnone if it doesn't have any input memory constraints
2088
  // in addition to meeting the conditions listed above.
2089
1.58k
  bool ReadOnly = true, ReadNone = true;
2090
1.58k
2091
2.73k
  for (unsigned i = 0, e = S.getNumOutputs(); i != e; 
i++1.14k
) {
2092
1.14k
    TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2093
1.14k
2094
    // Simplify the output constraint.
2095
1.14k
    std::string OutputConstraint(S.getOutputConstraint(i));
2096
1.14k
    OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2097
1.14k
                                          getTarget(), &OutputConstraintInfos);
2098
1.14k
2099
1.14k
    const Expr *OutExpr = S.getOutputExpr(i);
2100
1.14k
    OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2101
1.14k
2102
1.14k
    OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2103
1.14k
                                              getTarget(), CGM, S,
2104
1.14k
                                              Info.earlyClobber());
2105
1.14k
    OutputConstraints.push_back(OutputConstraint);
2106
1.14k
    LValue Dest = EmitLValue(OutExpr);
2107
1.14k
    if (!Constraints.empty())
2108
183
      Constraints += ',';
2109
1.14k
2110
    // If this is a register output, then make the inline asm return it
2111
    // by-value.  If this is a memory result, return the value by-reference.
2112
1.14k
    bool isScalarizableAggregate =
2113
1.14k
        hasAggregateEvaluationKind(OutExpr->getType());
2114
1.14k
    if (!Info.allowsMemory() && 
(969
hasScalarEvaluationKind(OutExpr->getType())969
||
2115
969
                                 
isScalarizableAggregate16
)) {
2116
969
      Constraints += "=" + OutputConstraint;
2117
969
      ResultRegQualTys.push_back(OutExpr->getType());
2118
969
      ResultRegDests.push_back(Dest);
2119
969
      ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
2120
969
      if (Info.allowsRegister() && isScalarizableAggregate) {
2121
16
        ResultTypeRequiresCast.push_back(true);
2122
16
        unsigned Size = getContext().getTypeSize(OutExpr->getType());
2123
16
        llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
2124
16
        ResultRegTypes.push_back(ConvTy);
2125
953
      } else {
2126
953
        ResultTypeRequiresCast.push_back(false);
2127
953
        ResultRegTypes.push_back(ResultTruncRegTypes.back());
2128
953
      }
2129
      // If this output is tied to an input, and if the input is larger, then
2130
      // we need to set the actual result type of the inline asm node to be the
2131
      // same as the input type.
2132
969
      if (Info.hasMatchingInput()) {
2133
41
        unsigned InputNo;
2134
44
        for (InputNo = 0; InputNo != S.getNumInputs(); 
++InputNo3
) {
2135
44
          TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2136
44
          if (Input.hasTiedOperand() && 
Input.getTiedOperand() == i43
)
2137
41
            break;
2138
44
        }
2139
41
        assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2140
41
2141
41
        QualType InputTy = S.getInputExpr(InputNo)->getType();
2142
41
        QualType OutputType = OutExpr->getType();
2143
41
2144
41
        uint64_t InputSize = getContext().getTypeSize(InputTy);
2145
41
        if (getContext().getTypeSize(OutputType) < InputSize) {
2146
          // Form the asm to return the value as a larger integer or fp type.
2147
4
          ResultRegTypes.back() = ConvertType(InputTy);
2148
4
        }
2149
41
      }
2150
969
      if (llvm::Type* AdjTy =
2151
969
            getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2152
969
                                                 ResultRegTypes.back()))
2153
969
        ResultRegTypes.back() = AdjTy;
2154
0
      else {
2155
0
        CGM.getDiags().Report(S.getAsmLoc(),
2156
0
                              diag::err_asm_invalid_type_in_input)
2157
0
            << OutExpr->getType() << OutputConstraint;
2158
0
      }
2159
969
2160
      // Update largest vector width for any vector types.
2161
969
      if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2162
131
        LargestVectorWidth =
2163
131
            std::max((uint64_t)LargestVectorWidth,
2164
131
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2165
180
    } else {
2166
180
      ArgTypes.push_back(Dest.getAddress(*this).getType());
2167
180
      Args.push_back(Dest.getPointer(*this));
2168
180
      Constraints += "=*";
2169
180
      Constraints += OutputConstraint;
2170
180
      ReadOnly = ReadNone = false;
2171
180
    }
2172
1.14k
2173
1.14k
    if (Info.isReadWrite()) {
2174
134
      InOutConstraints += ',';
2175
134
2176
134
      const Expr *InputExpr = S.getOutputExpr(i);
2177
134
      llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2178
134
                                            InOutConstraints,
2179
134
                                            InputExpr->getExprLoc());
2180
134
2181
134
      if (llvm::Type* AdjTy =
2182
134
          getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2183
134
                                               Arg->getType()))
2184
134
        Arg = Builder.CreateBitCast(Arg, AdjTy);
2185
134
2186
      // Update largest vector width for any vector types.
2187
134
      if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2188
5
        LargestVectorWidth =
2189
5
            std::max((uint64_t)LargestVectorWidth,
2190
5
                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2191
134
      if (Info.allowsRegister())
2192
119
        InOutConstraints += llvm::utostr(i);
2193
15
      else
2194
15
        InOutConstraints += OutputConstraint;
2195
134
2196
134
      InOutArgTypes.push_back(Arg->getType());
2197
134
      InOutArgs.push_back(Arg);
2198
134
    }
2199
1.14k
  }
2200
1.58k
2201
  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2202
  // to the return value slot. Only do this when returning in registers.
2203
1.58k
  if (isa<MSAsmStmt>(&S)) {
2204
154
    const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2205
154
    if (RetAI.isDirect() || 
RetAI.isExtend()131
) {
2206
      // Make a fake lvalue for the return value slot.
2207
26
      LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2208
26
      CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2209
26
          *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2210
26
          ResultRegDests, AsmString, S.getNumOutputs());
2211
26
      SawAsmBlock = true;
2212
26
    }
2213
154
  }
2214
1.58k
2215
3.24k
  for (unsigned i = 0, e = S.getNumInputs(); i != e; 
i++1.66k
) {
2216
1.66k
    const Expr *InputExpr = S.getInputExpr(i);
2217
1.66k
2218
1.66k
    TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2219
1.66k
2220
1.66k
    if (Info.allowsMemory())
2221
585
      ReadNone = false;
2222
1.66k
2223
1.66k
    if (!Constraints.empty())
2224
1.39k
      Constraints += ',';
2225
1.66k
2226
    // Simplify the input constraint.
2227
1.66k
    std::string InputConstraint(S.getInputConstraint(i));
2228
1.66k
    InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2229
1.66k
                                         &OutputConstraintInfos);
2230
1.66k
2231
1.66k
    InputConstraint = AddVariableConstraints(
2232
1.66k
        InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2233
1.66k
        getTarget(), CGM, S, false /* No EarlyClobber */);
2234
1.66k
2235
1.66k
    std::string ReplaceConstraint (InputConstraint);
2236
1.66k
    llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2237
1.66k
2238
    // If this input argument is tied to a larger output result, extend the
2239
    // input to be the same size as the output.  The LLVM backend wants to see
2240
    // the input and output of a matching constraint be the same size.  Note
2241
    // that GCC does not define what the top bits are here.  We use zext because
2242
    // that is usually cheaper, but LLVM IR should really get an anyext someday.
2243
1.66k
    if (Info.hasTiedOperand()) {
2244
41
      unsigned Output = Info.getTiedOperand();
2245
41
      QualType OutputType = S.getOutputExpr(Output)->getType();
2246
41
      QualType InputTy = InputExpr->getType();
2247
41
2248
41
      if (getContext().getTypeSize(OutputType) >
2249
7
          getContext().getTypeSize(InputTy)) {
2250
        // Use ptrtoint as appropriate so that we can do our extension.
2251
7
        if (isa<llvm::PointerType>(Arg->getType()))
2252
0
          Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2253
7
        llvm::Type *OutputTy = ConvertType(OutputType);
2254
7
        if (isa<llvm::IntegerType>(OutputTy))
2255
3
          Arg = Builder.CreateZExt(Arg, OutputTy);
2256
4
        else if (isa<llvm::PointerType>(OutputTy))
2257
1
          Arg = Builder.CreateZExt(Arg, IntPtrTy);
2258
3
        else {
2259
3
          assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2260
3
          Arg = Builder.CreateFPExt(Arg, OutputTy);
2261
3
        }
2262
7
      }
2263
      // Deal with the tied operands' constraint code in adjustInlineAsmType.
2264
41
      ReplaceConstraint = OutputConstraints[Output];
2265
41
    }
2266
1.66k
    if (llvm::Type* AdjTy =
2267
1.66k
          getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2268
1.66k
                                                   Arg->getType()))
2269
1.66k
      Arg = Builder.CreateBitCast(Arg, AdjTy);
2270
0
    else
2271
0
      CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2272
0
          << InputExpr->getType() << InputConstraint;
2273
1.66k
2274
    // Update largest vector width for any vector types.
2275
1.66k
    if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2276
139
      LargestVectorWidth =
2277
139
          std::max((uint64_t)LargestVectorWidth,
2278
139
                   VT->getPrimitiveSizeInBits().getKnownMinSize());
2279
1.66k
2280
1.66k
    ArgTypes.push_back(Arg->getType());
2281
1.66k
    Args.push_back(Arg);
2282
1.66k
    Constraints += InputConstraint;
2283
1.66k
  }
2284
1.58k
2285
  // Labels
2286
1.58k
  SmallVector<llvm::BasicBlock *, 16> Transfer;
2287
1.58k
  llvm::BasicBlock *Fallthrough = nullptr;
2288
1.58k
  bool IsGCCAsmGoto = false;
2289
1.58k
  if (const auto *GS =  dyn_cast<GCCAsmStmt>(&S)) {
2290
1.43k
    IsGCCAsmGoto = GS->isAsmGoto();
2291
1.43k
    if (IsGCCAsmGoto) {
2292
45
      for (const auto *E : GS->labels()) {
2293
45
        JumpDest Dest = getJumpDestForLabel(E->getLabel());
2294
45
        Transfer.push_back(Dest.getBlock());
2295
45
        llvm::BlockAddress *BA =
2296
45
            llvm::BlockAddress::get(CurFn, Dest.getBlock());
2297
45
        Args.push_back(BA);
2298
45
        ArgTypes.push_back(BA->getType());
2299
45
        if (!Constraints.empty())
2300
40
          Constraints += ',';
2301
45
        Constraints += 'X';
2302
45
      }
2303
26
      Fallthrough = createBasicBlock("asm.fallthrough");
2304
26
    }
2305
1.43k
  }
2306
1.58k
2307
  // Append the "input" part of inout constraints last.
2308
1.72k
  for (unsigned i = 0, e = InOutArgs.size(); i != e; 
i++134
) {
2309
134
    ArgTypes.push_back(InOutArgTypes[i]);
2310
134
    Args.push_back(InOutArgs[i]);
2311
134
  }
2312
1.58k
  Constraints += InOutConstraints;
2313
1.58k
2314
  // Clobbers
2315
2.72k
  for (unsigned i = 0, e = S.getNumClobbers(); i != e; 
i++1.14k
) {
2316
1.14k
    StringRef Clobber = S.getClobber(i);
2317
1.14k
2318
1.14k
    if (Clobber == "memory")
2319
136
      ReadOnly = ReadNone = false;
2320
1.00k
    else if (Clobber != "cc") {
2321
963
      Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2322
963
      if (CGM.getCodeGenOpts().StackClashProtector &&
2323
3
          getTarget().isSPRegName(Clobber)) {
2324
3
        CGM.getDiags().Report(S.getAsmLoc(),
2325
3
                              diag::warn_stack_clash_protection_inline_asm);
2326
3
      }
2327
963
    }
2328
1.14k
2329
1.14k
    if (!Constraints.empty())
2330
933
      Constraints += ',';
2331
1.14k
2332
1.14k
    Constraints += "~{";
2333
1.14k
    Constraints += Clobber;
2334
1.14k
    Constraints += '}';
2335
1.14k
  }
2336
1.58k
2337
  // Add machine specific clobbers
2338
1.58k
  std::string MachineClobbers = getTarget().getClobbers();
2339
1.58k
  if (!MachineClobbers.empty()) {
2340
999
    if (!Constraints.empty())
2341
887
      Constraints += ',';
2342
999
    Constraints += MachineClobbers;
2343
999
  }
2344
1.58k
2345
1.58k
  llvm::Type *ResultType;
2346
1.58k
  if (ResultRegTypes.empty())
2347
729
    ResultType = VoidTy;
2348
859
  else if (ResultRegTypes.size() == 1)
2349
790
    ResultType = ResultRegTypes[0];
2350
69
  else
2351
69
    ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2352
1.58k
2353
1.58k
  llvm::FunctionType *FTy =
2354
1.58k
    llvm::FunctionType::get(ResultType, ArgTypes, false);
2355
1.58k
2356
1.58k
  bool HasSideEffect = S.isVolatile() || 
S.getNumOutputs() == 0900
;
2357
1.58k
  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2358
1.43k
    
llvm::InlineAsm::AD_Intel154
: llvm::InlineAsm::AD_ATT;
2359
1.58k
  llvm::InlineAsm *IA =
2360
1.58k
    llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2361
1.58k
                         /* IsAlignStack */ false, AsmDialect);
2362
1.58k
  std::vector<llvm::Value*> RegResults;
2363
1.58k
  if (IsGCCAsmGoto) {
2364
26
    llvm::CallBrInst *Result =
2365
26
        Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2366
26
    EmitBlock(Fallthrough);
2367
26
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2368
26
                      ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
2369
26
                      *this, RegResults);
2370
1.56k
  } else {
2371
1.56k
    llvm::CallInst *Result =
2372
1.56k
        Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2373
1.56k
    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2374
1.56k
                      ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
2375
1.56k
                      *this, RegResults);
2376
1.56k
  }
2377
1.58k
2378
1.58k
  assert(RegResults.size() == ResultRegTypes.size());
2379
1.58k
  assert(RegResults.size() == ResultTruncRegTypes.size());
2380
1.58k
  assert(RegResults.size() == ResultRegDests.size());
2381
  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2382
  // in which case its size may grow.
2383
1.58k
  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2384
2.57k
  for (unsigned i = 0, e = RegResults.size(); i != e; 
++i989
) {
2385
992
    llvm::Value *Tmp = RegResults[i];
2386
992
2387
    // If the result type of the LLVM IR asm doesn't match the result type of
2388
    // the expression, do the conversion.
2389
992
    if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2390
34
      llvm::Type *TruncTy = ResultTruncRegTypes[i];
2391
34
2392
      // Truncate the integer result to the right size, note that TruncTy can be
2393
      // a pointer.
2394
34
      if (TruncTy->isFloatingPointTy())
2395
1
        Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2396
33
      else if (TruncTy->isPointerTy() && 
Tmp->getType()->isIntegerTy()0
) {
2397
0
        uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2398
0
        Tmp = Builder.CreateTrunc(Tmp,
2399
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2400
0
        Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2401
33
      } else if (Tmp->getType()->isPointerTy() && 
TruncTy->isIntegerTy()0
) {
2402
0
        uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2403
0
        Tmp = Builder.CreatePtrToInt(Tmp,
2404
0
                   llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2405
0
        Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2406
33
      } else if (TruncTy->isIntegerTy()) {
2407
7
        Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2408
26
      } else if (TruncTy->isVectorTy()) {
2409
10
        Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2410
10
      }
2411
34
    }
2412
992
2413
992
    LValue Dest = ResultRegDests[i];
2414
    // ResultTypeRequiresCast elements correspond to the first
2415
    // ResultTypeRequiresCast.size() elements of RegResults.
2416
992
    if ((i < ResultTypeRequiresCast.size()) && 
ResultTypeRequiresCast[i]969
) {
2417
16
      unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2418
16
      Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2419
16
                                        ResultRegTypes[i]->getPointerTo());
2420
16
      QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2421
16
      if (Ty.isNull()) {
2422
3
        const Expr *OutExpr = S.getOutputExpr(i);
2423
3
        CGM.Error(
2424
3
            OutExpr->getExprLoc(),
2425
3
            "impossible constraint in asm: can't store value into a register");
2426
3
        return;
2427
3
      }
2428
13
      Dest = MakeAddrLValue(A, Ty);
2429
13
    }
2430
989
    EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2431
989
  }
2432
1.58k
}
2433
2434
734
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2435
734
  const RecordDecl *RD = S.getCapturedRecordDecl();
2436
734
  QualType RecordTy = getContext().getRecordType(RD);
2437
734
2438
  // Initialize the captured struct.
2439
734
  LValue SlotLV =
2440
734
    MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2441
734
2442
734
  RecordDecl::field_iterator CurField = RD->field_begin();
2443
734
  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2444
734
                                                 E = S.capture_init_end();
2445
1.81k
       I != E; 
++I, ++CurField1.08k
) {
2446
1.08k
    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2447
1.08k
    if (CurField->hasCapturedVLAType()) {
2448
47
      EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2449
1.03k
    } else {
2450
1.03k
      EmitInitializerForField(*CurField, LV, *I);
2451
1.03k
    }
2452
1.08k
  }
2453
734
2454
734
  return SlotLV;
2455
734
}
2456
2457
/// Generate an outlined function for the body of a CapturedStmt, store any
2458
/// captured variables into the captured struct, and call the outlined function.
2459
llvm::Function *
2460
27
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2461
27
  LValue CapStruct = InitCapturedStruct(S);
2462
27
2463
  // Emit the CapturedDecl
2464
27
  CodeGenFunction CGF(CGM, true);
2465
27
  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2466
27
  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2467
27
  delete CGF.CapturedStmtInfo;
2468
27
2469
  // Emit call to the helper function.
2470
27
  EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2471
27
2472
27
  return F;
2473
27
}
2474
2475
707
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2476
707
  LValue CapStruct = InitCapturedStruct(S);
2477
707
  return CapStruct.getAddress(*this);
2478
707
}
2479
2480
/// Creates the outlined function for a CapturedStmt.
2481
llvm::Function *
2482
734
CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2483
734
  assert(CapturedStmtInfo &&
2484
734
    "CapturedStmtInfo should be set when generating the captured function");
2485
734
  const CapturedDecl *CD = S.getCapturedDecl();
2486
734
  const RecordDecl *RD = S.getCapturedRecordDecl();
2487
734
  SourceLocation Loc = S.getBeginLoc();
2488
734
  assert(CD->hasBody() && "missing CapturedDecl body");
2489
734
2490
  // Build the argument list.
2491
734
  ASTContext &Ctx = CGM.getContext();
2492
734
  FunctionArgList Args;
2493
734
  Args.append(CD->param_begin(), CD->param_end());
2494
734
2495
  // Create the function declaration.
2496
734
  const CGFunctionInfo &FuncInfo =
2497
734
    CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2498
734
  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2499
734
2500
734
  llvm::Function *F =
2501
734
    llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2502
734
                           CapturedStmtInfo->getHelperName(), &CGM.getModule());
2503
734
  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2504
734
  if (CD->isNothrow())
2505
555
    F->addFnAttr(llvm::Attribute::NoUnwind);
2506
734
2507
  // Generate the function.
2508
734
  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2509
734
                CD->getBody()->getBeginLoc());
2510
  // Set the context parameter in CapturedStmtInfo.
2511
734
  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2512
734
  CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2513
734
2514
  // Initialize variable-length arrays.
2515
734
  LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2516
734
                                           Ctx.getTagDeclType(RD));
2517
1.08k
  for (auto *FD : RD->fields()) {
2518
1.08k
    if (FD->hasCapturedVLAType()) {
2519
47
      auto *ExprArg =
2520
47
          EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2521
47
              .getScalarVal();
2522
47
      auto VAT = FD->getCapturedVLAType();
2523
47
      VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2524
47
    }
2525
1.08k
  }
2526
734
2527
  // If 'this' is captured, load it into CXXThisValue.
2528
734
  if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2529
24
    FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2530
24
    LValue ThisLValue = EmitLValueForField(Base, FD);
2531
24
    CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2532
24
  }
2533
734
2534
734
  PGO.assignRegionCounters(GlobalDecl(CD), F);
2535
734
  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2536
734
  FinishFunction(CD->getBodyRBrace());
2537
734
2538
734
  return F;
2539
734
}