Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/ARM/ARMCallLowering.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
/// \file
11
/// This file implements the lowering of LLVM calls to machine code calls for
12
/// GlobalISel.
13
//
14
//===----------------------------------------------------------------------===//
15
16
#include "ARMCallLowering.h"
17
#include "ARMBaseInstrInfo.h"
18
#include "ARMISelLowering.h"
19
#include "ARMSubtarget.h"
20
#include "Utils/ARMBaseInfo.h"
21
#include "llvm/ADT/SmallVector.h"
22
#include "llvm/CodeGen/Analysis.h"
23
#include "llvm/CodeGen/CallingConvLower.h"
24
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25
#include "llvm/CodeGen/GlobalISel/Utils.h"
26
#include "llvm/CodeGen/LowLevelType.h"
27
#include "llvm/CodeGen/MachineBasicBlock.h"
28
#include "llvm/CodeGen/MachineFrameInfo.h"
29
#include "llvm/CodeGen/MachineFunction.h"
30
#include "llvm/CodeGen/MachineInstrBuilder.h"
31
#include "llvm/CodeGen/MachineMemOperand.h"
32
#include "llvm/CodeGen/MachineOperand.h"
33
#include "llvm/CodeGen/MachineRegisterInfo.h"
34
#include "llvm/CodeGen/MachineValueType.h"
35
#include "llvm/CodeGen/ValueTypes.h"
36
#include "llvm/IR/Attributes.h"
37
#include "llvm/IR/DataLayout.h"
38
#include "llvm/IR/DerivedTypes.h"
39
#include "llvm/IR/Function.h"
40
#include "llvm/IR/Type.h"
41
#include "llvm/IR/Value.h"
42
#include "llvm/Support/Casting.h"
43
#include "llvm/Support/LowLevelTypeImpl.h"
44
#include "llvm/Target/TargetRegisterInfo.h"
45
#include "llvm/Target/TargetSubtargetInfo.h"
46
#include <algorithm>
47
#include <cassert>
48
#include <cstdint>
49
#include <utility>
50
51
using namespace llvm;
52
53
ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI)
54
6.10k
    : CallLowering(&TLI) {}
55
56
static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
57
1.37k
                            Type *T) {
58
1.37k
  if (T->isArrayTy())
59
57
    return true;
60
1.32k
61
1.32k
  
if (1.32k
T->isStructTy()1.32k
) {
62
30
    // For now we only allow homogeneous structs that we can manipulate with
63
30
    // G_MERGE_VALUES and G_UNMERGE_VALUES
64
30
    auto StructT = cast<StructType>(T);
65
70
    for (unsigned i = 1, e = StructT->getNumElements(); 
i != e70
;
++i40
)
66
45
      
if (45
StructT->getElementType(i) != StructT->getElementType(0)45
)
67
5
        return false;
68
25
    return true;
69
1.29k
  }
70
1.29k
71
1.29k
  EVT VT = TLI.getValueType(DL, T, true);
72
1.29k
  if (
!VT.isSimple() || 1.29k
VT.isVector()1.28k
||
73
1.27k
      
!(VT.isInteger() || 1.27k
VT.isFloatingPoint()486
))
74
15
    return false;
75
1.27k
76
1.27k
  unsigned VTSize = VT.getSimpleVT().getSizeInBits();
77
1.27k
78
1.27k
  if (VTSize == 64)
79
1.27k
    // FIXME: Support i64 too
80
236
    return VT.isFloatingPoint();
81
1.03k
82
1.03k
  
return VTSize == 1 || 1.03k
VTSize == 81.01k
||
VTSize == 16888
||
VTSize == 32748
;
83
1.37k
}
84
85
namespace {
86
87
/// Helper class for values going out through an ABI boundary (used for handling
88
/// function return values and call parameters).
89
struct OutgoingValueHandler : public CallLowering::ValueHandler {
90
  OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
91
                       MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
92
439
      : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
93
94
  unsigned getStackAddress(uint64_t Size, int64_t Offset,
95
100
                           MachinePointerInfo &MPO) override {
96
100
    assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
97
100
           "Unsupported size");
98
100
99
100
    LLT p0 = LLT::pointer(0, 32);
100
100
    LLT s32 = LLT::scalar(32);
101
100
    unsigned SPReg = MRI.createGenericVirtualRegister(p0);
102
100
    MIRBuilder.buildCopy(SPReg, ARM::SP);
103
100
104
100
    unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
105
100
    MIRBuilder.buildConstant(OffsetReg, Offset);
106
100
107
100
    unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
108
100
    MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
109
100
110
100
    MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
111
100
    return AddrReg;
112
100
  }
113
114
  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
115
753
                        CCValAssign &VA) override {
116
753
    assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
117
753
    assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
118
753
119
753
    assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
120
753
    assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
121
753
122
753
    unsigned ExtReg = extendRegister(ValVReg, VA);
123
753
    MIRBuilder.buildCopy(PhysReg, ExtReg);
124
753
    MIB.addUse(PhysReg, RegState::Implicit);
125
753
  }
126
127
  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
128
100
                            MachinePointerInfo &MPO, CCValAssign &VA) override {
129
100
    assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
130
100
           "Unsupported size");
131
100
132
100
    unsigned ExtReg = extendRegister(ValVReg, VA);
133
100
    auto MMO = MIRBuilder.getMF().getMachineMemOperand(
134
100
        MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
135
100
        /* Alignment */ 0);
136
100
    MIRBuilder.buildStore(ExtReg, Addr, *MMO);
137
100
  }
138
139
  unsigned assignCustomValue(const CallLowering::ArgInfo &Arg,
140
112
                             ArrayRef<CCValAssign> VAs) override {
141
112
    CCValAssign VA = VAs[0];
142
112
    assert(VA.needsCustom() && "Value doesn't need custom handling");
143
112
    assert(VA.getValVT() == MVT::f64 && "Unsupported type");
144
112
145
112
    CCValAssign NextVA = VAs[1];
146
112
    assert(NextVA.needsCustom() && "Value doesn't need custom handling");
147
112
    assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
148
112
149
112
    assert(VA.getValNo() == NextVA.getValNo() &&
150
112
           "Values belong to different arguments");
151
112
152
112
    assert(VA.isRegLoc() && "Value should be in reg");
153
112
    assert(NextVA.isRegLoc() && "Value should be in reg");
154
112
155
112
    unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
156
112
                          MRI.createGenericVirtualRegister(LLT::scalar(32))};
157
112
    MIRBuilder.buildUnmerge(NewRegs, Arg.Reg);
158
112
159
112
    bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
160
112
    if (!IsLittle)
161
7
      std::swap(NewRegs[0], NewRegs[1]);
162
112
163
112
    assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
164
112
    assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
165
112
166
112
    return 1;
167
112
  }
168
169
  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
170
                 CCValAssign::LocInfo LocInfo,
171
741
                 const CallLowering::ArgInfo &Info, CCState &State) override {
172
741
    if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State))
173
0
      return true;
174
741
175
741
    StackSize =
176
741
        std::max(StackSize, static_cast<uint64_t>(State.getNextStackOffset()));
177
741
    return false;
178
741
  }
179
180
  MachineInstrBuilder &MIB;
181
  uint64_t StackSize = 0;
182
};
183
184
} // end anonymous namespace
185
186
void ARMCallLowering::splitToValueTypes(
187
    const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
188
1.34k
    MachineFunction &MF, const SplitArgTy &PerformArgSplit) const {
189
1.34k
  const ARMTargetLowering &TLI = *getTLI<ARMTargetLowering>();
190
1.34k
  LLVMContext &Ctx = OrigArg.Ty->getContext();
191
1.34k
  const DataLayout &DL = MF.getDataLayout();
192
1.34k
  MachineRegisterInfo &MRI = MF.getRegInfo();
193
1.34k
  const Function *F = MF.getFunction();
194
1.34k
195
1.34k
  SmallVector<EVT, 4> SplitVTs;
196
1.34k
  SmallVector<uint64_t, 4> Offsets;
197
1.34k
  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
198
1.34k
199
1.34k
  if (
SplitVTs.size() == 11.34k
) {
200
1.26k
    // Even if there is no splitting to do, we still want to replace the
201
1.26k
    // original type (e.g. pointer type -> integer).
202
1.26k
    auto Flags = OrigArg.Flags;
203
1.26k
    unsigned OriginalAlignment = DL.getABITypeAlignment(OrigArg.Ty);
204
1.26k
    Flags.setOrigAlign(OriginalAlignment);
205
1.26k
    SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx), Flags,
206
1.26k
                           OrigArg.IsFixed);
207
1.26k
    return;
208
1.26k
  }
209
82
210
82
  unsigned FirstRegIdx = SplitArgs.size();
211
523
  for (unsigned i = 0, e = SplitVTs.size(); 
i != e523
;
++i441
) {
212
441
    EVT SplitVT = SplitVTs[i];
213
441
    Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
214
441
    auto Flags = OrigArg.Flags;
215
441
216
441
    unsigned OriginalAlignment = DL.getABITypeAlignment(SplitTy);
217
441
    Flags.setOrigAlign(OriginalAlignment);
218
441
219
441
    bool NeedsConsecutiveRegisters =
220
441
        TLI.functionArgumentNeedsConsecutiveRegisters(
221
441
            SplitTy, F->getCallingConv(), F->isVarArg());
222
441
    if (
NeedsConsecutiveRegisters441
) {
223
56
      Flags.setInConsecutiveRegs();
224
56
      if (i == e - 1)
225
16
        Flags.setInConsecutiveRegsLast();
226
56
    }
227
441
228
441
    SplitArgs.push_back(
229
441
        ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)),
230
441
                SplitTy, Flags, OrigArg.IsFixed});
231
441
  }
232
82
233
523
  for (unsigned i = 0; 
i < Offsets.size()523
;
++i441
)
234
441
    PerformArgSplit(SplitArgs[FirstRegIdx + i].Reg, Offsets[i] * 8);
235
1.34k
}
236
237
/// Lower the return value for the already existing \p Ret. This assumes that
238
/// \p MIRBuilder's insertion point is correct.
239
bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
240
                                     const Value *Val, unsigned VReg,
241
261
                                     MachineInstrBuilder &Ret) const {
242
261
  if (!Val)
243
261
    // Nothing to do here.
244
14
    return true;
245
247
246
247
  auto &MF = MIRBuilder.getMF();
247
247
  const auto &F = *MF.getFunction();
248
247
249
247
  auto DL = MF.getDataLayout();
250
247
  auto &TLI = *getTLI<ARMTargetLowering>();
251
247
  if (!isSupportedType(DL, TLI, Val->getType()))
252
0
    return false;
253
247
254
247
  SmallVector<ArgInfo, 4> SplitVTs;
255
247
  SmallVector<unsigned, 4> Regs;
256
247
  ArgInfo RetInfo(VReg, Val->getType());
257
247
  setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
258
26
  splitToValueTypes(RetInfo, SplitVTs, MF, [&](unsigned Reg, uint64_t Offset) {
259
26
    Regs.push_back(Reg);
260
26
  });
261
247
262
247
  if (Regs.size() > 1)
263
10
    MIRBuilder.buildUnmerge(Regs, VReg);
264
261
265
261
  CCAssignFn *AssignFn =
266
261
      TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg());
267
261
268
261
  OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret, AssignFn);
269
261
  return handleAssignments(MIRBuilder, SplitVTs, RetHandler);
270
261
}
271
272
bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
273
261
                                  const Value *Val, unsigned VReg) const {
274
261
  assert(!Val == !VReg && "Return value without a vreg");
275
261
276
261
  auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
277
261
  unsigned Opcode = ST.getReturnOpcode();
278
261
  auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(predOps(ARMCC::AL));
279
261
280
261
  if (!lowerReturnVal(MIRBuilder, Val, VReg, Ret))
281
0
    return false;
282
261
283
261
  MIRBuilder.insertInstr(Ret);
284
261
  return true;
285
261
}
286
287
namespace {
288
289
/// Helper class for values coming in through an ABI boundary (used for handling
290
/// formal arguments and call return values).
291
struct IncomingValueHandler : public CallLowering::ValueHandler {
292
  IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
293
                       CCAssignFn AssignFn)
294
390
      : ValueHandler(MIRBuilder, MRI, AssignFn) {}
295
296
  unsigned getStackAddress(uint64_t Size, int64_t Offset,
297
140
                           MachinePointerInfo &MPO) override {
298
140
    assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
299
140
           "Unsupported size");
300
140
301
140
    auto &MFI = MIRBuilder.getMF().getFrameInfo();
302
140
303
140
    int FI = MFI.CreateFixedObject(Size, Offset, true);
304
140
    MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
305
140
306
140
    unsigned AddrReg =
307
140
        MRI.createGenericVirtualRegister(LLT::pointer(MPO.getAddrSpace(), 32));
308
140
    MIRBuilder.buildFrameIndex(AddrReg, FI);
309
140
310
140
    return AddrReg;
311
140
  }
312
313
  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
314
140
                            MachinePointerInfo &MPO, CCValAssign &VA) override {
315
140
    assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
316
140
           "Unsupported size");
317
140
318
140
    if (VA.getLocInfo() == CCValAssign::SExt ||
319
140
        
VA.getLocInfo() == CCValAssign::ZExt131
) {
320
14
      // If the value is zero- or sign-extended, its size becomes 4 bytes, so
321
14
      // that's what we should load.
322
14
      Size = 4;
323
14
      assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
324
14
325
14
      auto LoadVReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
326
14
      buildLoad(LoadVReg, Addr, Size, /* Alignment */ 0, MPO);
327
14
      MIRBuilder.buildTrunc(ValVReg, LoadVReg);
328
140
    } else {
329
126
      // If the value is not extended, a simple load will suffice.
330
126
      buildLoad(ValVReg, Addr, Size, /* Alignment */ 0, MPO);
331
126
    }
332
140
  }
333
334
  void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment,
335
140
                 MachinePointerInfo &MPO) {
336
140
    auto MMO = MIRBuilder.getMF().getMachineMemOperand(
337
140
        MPO, MachineMemOperand::MOLoad, Size, Alignment);
338
140
    MIRBuilder.buildLoad(Val, Addr, *MMO);
339
140
  }
340
341
  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
342
748
                        CCValAssign &VA) override {
343
748
    assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
344
748
    assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
345
748
346
748
    assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
347
748
    assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
348
748
349
748
    // The necessary extensions are handled on the other side of the ABI
350
748
    // boundary.
351
748
    markPhysRegUsed(PhysReg);
352
748
    MIRBuilder.buildCopy(ValVReg, PhysReg);
353
748
  }
354
355
  unsigned assignCustomValue(const ARMCallLowering::ArgInfo &Arg,
356
48
                             ArrayRef<CCValAssign> VAs) override {
357
48
    CCValAssign VA = VAs[0];
358
48
    assert(VA.needsCustom() && "Value doesn't need custom handling");
359
48
    assert(VA.getValVT() == MVT::f64 && "Unsupported type");
360
48
361
48
    CCValAssign NextVA = VAs[1];
362
48
    assert(NextVA.needsCustom() && "Value doesn't need custom handling");
363
48
    assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
364
48
365
48
    assert(VA.getValNo() == NextVA.getValNo() &&
366
48
           "Values belong to different arguments");
367
48
368
48
    assert(VA.isRegLoc() && "Value should be in reg");
369
48
    assert(NextVA.isRegLoc() && "Value should be in reg");
370
48
371
48
    unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
372
48
                          MRI.createGenericVirtualRegister(LLT::scalar(32))};
373
48
374
48
    assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
375
48
    assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
376
48
377
48
    bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
378
48
    if (!IsLittle)
379
8
      std::swap(NewRegs[0], NewRegs[1]);
380
48
381
48
    MIRBuilder.buildMerge(Arg.Reg, NewRegs);
382
48
383
48
    return 1;
384
48
  }
385
386
  /// Marking a physical register as used is different between formal
387
  /// parameters, where it's a basic block live-in, and call returns, where it's
388
  /// an implicit-def of the call instruction.
389
  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
390
};
391
392
struct FormalArgHandler : public IncomingValueHandler {
393
  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
394
                   CCAssignFn AssignFn)
395
208
      : IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
396
397
534
  void markPhysRegUsed(unsigned PhysReg) override {
398
534
    MIRBuilder.getMBB().addLiveIn(PhysReg);
399
534
  }
400
};
401
402
} // end anonymous namespace
403
404
bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
405
                                           const Function &F,
406
322
                                           ArrayRef<unsigned> VRegs) const {
407
322
  // Quick exit if there aren't any args
408
322
  if (F.arg_empty())
409
74
    return true;
410
248
411
248
  
if (248
F.isVarArg()248
)
412
5
    return false;
413
243
414
243
  auto &MF = MIRBuilder.getMF();
415
243
  auto &MBB = MIRBuilder.getMBB();
416
243
  auto DL = MF.getDataLayout();
417
243
  auto &TLI = *getTLI<ARMTargetLowering>();
418
243
419
243
  auto Subtarget = TLI.getSubtarget();
420
243
421
243
  if (Subtarget->isThumb())
422
5
    return false;
423
238
424
238
  for (auto &Arg : F.args())
425
570
    
if (570
!isSupportedType(DL, TLI, Arg.getType())570
)
426
30
      return false;
427
208
428
208
  CCAssignFn *AssignFn =
429
208
      TLI.CCAssignFnForCall(F.getCallingConv(), F.isVarArg());
430
208
431
208
  FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(),
432
208
                              AssignFn);
433
208
434
208
  SmallVector<ArgInfo, 8> ArgInfos;
435
208
  SmallVector<unsigned, 4> SplitRegs;
436
208
  unsigned Idx = 0;
437
540
  for (auto &Arg : F.args()) {
438
540
    ArgInfo AInfo(VRegs[Idx], Arg.getType());
439
540
    setArgFlags(AInfo, Idx + AttributeList::FirstArgIndex, DL, F);
440
540
441
540
    SplitRegs.clear();
442
540
443
130
    splitToValueTypes(AInfo, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) {
444
130
      SplitRegs.push_back(Reg);
445
130
    });
446
540
447
540
    if (!SplitRegs.empty())
448
20
      MIRBuilder.buildMerge(VRegs[Idx], SplitRegs);
449
540
450
540
    Idx++;
451
540
  }
452
208
453
208
  if (!MBB.empty())
454
14
    MIRBuilder.setInstr(*MBB.begin());
455
322
456
322
  return handleAssignments(MIRBuilder, ArgInfos, ArgHandler);
457
322
}
458
459
namespace {
460
461
struct CallReturnHandler : public IncomingValueHandler {
462
  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
463
                    MachineInstrBuilder MIB, CCAssignFn *AssignFn)
464
182
      : IncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
465
466
214
  void markPhysRegUsed(unsigned PhysReg) override {
467
214
    MIB.addDef(PhysReg, RegState::Implicit);
468
214
  }
469
470
  MachineInstrBuilder MIB;
471
};
472
473
} // end anonymous namespace
474
475
bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
476
                                CallingConv::ID CallConv,
477
                                const MachineOperand &Callee,
478
                                const ArgInfo &OrigRet,
479
197
                                ArrayRef<ArgInfo> OrigArgs) const {
480
197
  MachineFunction &MF = MIRBuilder.getMF();
481
197
  const auto &TLI = *getTLI<ARMTargetLowering>();
482
197
  const auto &DL = MF.getDataLayout();
483
197
  const auto &STI = MF.getSubtarget();
484
197
  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
485
197
  MachineRegisterInfo &MRI = MF.getRegInfo();
486
197
487
197
  if (MF.getSubtarget<ARMSubtarget>().genLongCalls())
488
0
    return false;
489
197
490
197
  auto CallSeqStart = MIRBuilder.buildInstr(ARM::ADJCALLSTACKDOWN);
491
197
492
197
  // Create the call instruction so we can add the implicit uses of arg
493
197
  // registers, but don't insert it yet.
494
197
  auto MIB = MIRBuilder.buildInstrNoInsert(ARM::BLX).add(Callee).addRegMask(
495
197
      TRI->getCallPreservedMask(MF, CallConv));
496
197
  if (
Callee.isReg()197
) {
497
2
    auto CalleeReg = Callee.getReg();
498
2
    if (
CalleeReg && 2
!TRI->isPhysicalRegister(CalleeReg)2
)
499
2
      MIB->getOperand(0).setReg(constrainOperandRegClass(
500
2
          MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(),
501
2
          *MIB.getInstr(), MIB->getDesc(), CalleeReg, 0));
502
2
  }
503
197
504
197
  SmallVector<ArgInfo, 8> ArgInfos;
505
378
  for (auto Arg : OrigArgs) {
506
378
    if (!isSupportedType(DL, TLI, Arg.Ty))
507
0
      return false;
508
378
509
378
    
if (378
!Arg.IsFixed378
)
510
5
      return false;
511
373
512
373
    SmallVector<unsigned, 8> Regs;
513
130
    splitToValueTypes(Arg, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) {
514
130
      Regs.push_back(Reg);
515
130
    });
516
373
517
373
    if (Regs.size() > 1)
518
20
      MIRBuilder.buildUnmerge(Regs, Arg.Reg);
519
378
  }
520
197
521
192
  auto ArgAssignFn = TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
522
192
  OutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB, ArgAssignFn);
523
192
  if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler))
524
0
    return false;
525
192
526
192
  // Now we can add the actual call instruction to the correct basic block.
527
192
  MIRBuilder.insertInstr(MIB);
528
192
529
192
  if (
!OrigRet.Ty->isVoidTy()192
) {
530
182
    if (!isSupportedType(DL, TLI, OrigRet.Ty))
531
0
      return false;
532
182
533
182
    ArgInfos.clear();
534
182
    SmallVector<unsigned, 8> SplitRegs;
535
182
    splitToValueTypes(OrigRet, ArgInfos, MF,
536
155
                      [&](unsigned Reg, uint64_t Offset) {
537
155
                        SplitRegs.push_back(Reg);
538
155
                      });
539
182
540
182
    auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, /*IsVarArg=*/false);
541
182
    CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);
542
182
    if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler))
543
10
      return false;
544
172
545
172
    
if (172
!SplitRegs.empty()172
) {
546
22
      // We have split the value and allocated each individual piece, now build
547
22
      // it up again.
548
22
      MIRBuilder.buildMerge(OrigRet.Reg, SplitRegs);
549
22
    }
550
182
  }
551
192
552
192
  // We now know the size of the stack - update the ADJCALLSTACKDOWN
553
192
  // accordingly.
554
182
  CallSeqStart.addImm(ArgHandler.StackSize).addImm(0).add(predOps(ARMCC::AL));
555
182
556
182
  MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
557
182
      .addImm(ArgHandler.StackSize)
558
182
      .addImm(0)
559
182
      .add(predOps(ARMCC::AL));
560
182
561
182
  return true;
562
197
}