Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file contains the AArch64 implementation of the TargetRegisterInfo
10
// class.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "AArch64RegisterInfo.h"
15
#include "AArch64FrameLowering.h"
16
#include "AArch64InstrInfo.h"
17
#include "AArch64MachineFunctionInfo.h"
18
#include "AArch64Subtarget.h"
19
#include "MCTargetDesc/AArch64AddressingModes.h"
20
#include "llvm/ADT/BitVector.h"
21
#include "llvm/ADT/Triple.h"
22
#include "llvm/CodeGen/MachineFrameInfo.h"
23
#include "llvm/CodeGen/MachineInstrBuilder.h"
24
#include "llvm/CodeGen/MachineRegisterInfo.h"
25
#include "llvm/CodeGen/RegisterScavenging.h"
26
#include "llvm/IR/Function.h"
27
#include "llvm/IR/DiagnosticInfo.h"
28
#include "llvm/Support/raw_ostream.h"
29
#include "llvm/CodeGen/TargetFrameLowering.h"
30
#include "llvm/Target/TargetOptions.h"
31
32
using namespace llvm;
33
34
#define GET_REGINFO_TARGET_DESC
35
#include "AArch64GenRegisterInfo.inc"
36
37
AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
38
9.10k
    : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
39
9.10k
  AArch64_MC::initLLVMToCVRegMapping(this);
40
9.10k
}
41
42
const MCPhysReg *
43
2.61M
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
44
2.61M
  assert(MF && "Invalid MachineFunction pointer.");
45
2.61M
  if (MF->getSubtarget<AArch64Subtarget>().isTargetWindows())
46
1.75k
    return CSR_Win_AArch64_AAPCS_SaveList;
47
2.60M
  if (MF->getFunction().getCallingConv() == CallingConv::GHC)
48
104
    // GHC set of callee saved regs is empty as all those regs are
49
104
    // used for passing STG regs around
50
104
    return CSR_AArch64_NoRegs_SaveList;
51
2.60M
  if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
52
0
    return CSR_AArch64_AllRegs_SaveList;
53
2.60M
  if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
54
15
    return CSR_AArch64_AAVPCS_SaveList;
55
2.60M
  if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
56
108
    return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ?
57
74
           CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
58
108
           
CSR_AArch64_CXX_TLS_Darwin_SaveList34
;
59
2.60M
  if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
60
2.60M
          ->supportSwiftError() &&
61
2.60M
      MF->getFunction().getAttributes().hasAttrSomewhere(
62
2.60M
          Attribute::SwiftError))
63
823
    return CSR_AArch64_AAPCS_SwiftError_SaveList;
64
2.60M
  if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
65
27
    return CSR_AArch64_RT_MostRegs_SaveList;
66
2.60M
  else
67
2.60M
    return CSR_AArch64_AAPCS_SaveList;
68
2.60M
}
69
70
const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
71
36.9k
    const MachineFunction *MF) const {
72
36.9k
  assert(MF && "Invalid MachineFunction pointer.");
73
36.9k
  if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
74
36.9k
      
MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()21
)
75
16
    return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList;
76
36.9k
  return nullptr;
77
36.9k
}
78
79
void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs(
80
46
    MachineFunction &MF) const {
81
46
  const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
82
46
  SmallVector<MCPhysReg, 32> UpdatedCSRs;
83
966
  for (const MCPhysReg *I = CSRs; *I; 
++I920
)
84
920
    UpdatedCSRs.push_back(*I);
85
46
86
1.47k
  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); 
++i1.42k
) {
87
1.42k
    if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
88
78
      UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
89
78
    }
90
1.42k
  }
91
46
  // Register lists are zero-terminated.
92
46
  UpdatedCSRs.push_back(0);
93
46
  MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
94
46
}
95
96
const TargetRegisterClass *
97
AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
98
328k
                                       unsigned Idx) const {
99
328k
  // edge case for GPR/FPR register classes
100
328k
  if (RC == &AArch64::GPR32allRegClass && 
Idx == AArch64::hsub18
)
101
18
    return &AArch64::FPR32RegClass;
102
328k
  else if (RC == &AArch64::GPR64allRegClass && 
Idx == AArch64::hsub218k
)
103
3
    return &AArch64::FPR64RegClass;
104
328k
105
328k
  // Forward to TableGen's default version.
106
328k
  return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
107
328k
}
108
109
const uint32_t *
110
AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
111
1.55M
                                          CallingConv::ID CC) const {
112
1.55M
  bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
113
1.55M
  if (CC == CallingConv::GHC)
114
4
    // This is academic because all GHC calls are (supposed to be) tail calls
115
4
    return SCS ? 
CSR_AArch64_NoRegs_SCS_RegMask0
: CSR_AArch64_NoRegs_RegMask;
116
1.55M
  if (CC == CallingConv::AnyReg)
117
13
    return SCS ? 
CSR_AArch64_AllRegs_SCS_RegMask0
: CSR_AArch64_AllRegs_RegMask;
118
1.55M
  if (CC == CallingConv::CXX_FAST_TLS)
119
6
    return SCS ? 
CSR_AArch64_CXX_TLS_Darwin_SCS_RegMask0
120
6
               : CSR_AArch64_CXX_TLS_Darwin_RegMask;
121
1.55M
  if (CC == CallingConv::AArch64_VectorCall)
122
0
    return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
123
1.55M
  if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
124
1.55M
          ->supportSwiftError() &&
125
1.55M
      MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
126
40
    return SCS ? 
CSR_AArch64_AAPCS_SwiftError_SCS_RegMask0
127
40
               : CSR_AArch64_AAPCS_SwiftError_RegMask;
128
1.55M
  if (CC == CallingConv::PreserveMost)
129
4
    return SCS ? 
CSR_AArch64_RT_MostRegs_SCS_RegMask0
130
4
               : CSR_AArch64_RT_MostRegs_RegMask;
131
1.55M
  else
132
1.55M
    return SCS ? 
CSR_AArch64_AAPCS_SCS_RegMask8
:
CSR_AArch64_AAPCS_RegMask1.55M
;
133
1.55M
}
134
135
36
const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
136
36
  if (TT.isOSDarwin())
137
36
    return CSR_AArch64_TLS_Darwin_RegMask;
138
0
139
0
  assert(TT.isOSBinFormatELF() && "Invalid target");
140
0
  return CSR_AArch64_TLS_ELF_RegMask;
141
0
}
142
143
void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF,
144
11
                                                 const uint32_t **Mask) const {
145
11
  uint32_t *UpdatedMask = MF.allocateRegMask();
146
11
  unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
147
11
  memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
148
11
149
352
  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); 
++i341
) {
150
341
    if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
151
27
      for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
152
27
                                   this, true);
153
81
           SubReg.isValid(); 
++SubReg54
) {
154
54
        // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
155
54
        // register mask.
156
54
        UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
157
54
      }
158
27
    }
159
341
  }
160
11
  *Mask = UpdatedMask;
161
11
}
162
163
20
const uint32_t *AArch64RegisterInfo::getNoPreservedMask() const {
164
20
  return CSR_AArch64_NoRegs_RegMask;
165
20
}
166
167
const uint32_t *
168
AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
169
3.62k
                                                CallingConv::ID CC) const {
170
3.62k
  // This should return a register mask that is the same as that returned by
171
3.62k
  // getCallPreservedMask but that additionally preserves the register used for
172
3.62k
  // the first i64 argument (which must also be the register used to return a
173
3.62k
  // single i64 return value)
174
3.62k
  //
175
3.62k
  // In case that the calling convention does not use the same register for
176
3.62k
  // both, the function should return NULL (does not currently apply)
177
3.62k
  assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
178
3.62k
  return CSR_AArch64_AAPCS_ThisReturn_RegMask;
179
3.62k
}
180
181
4
const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
182
4
  return CSR_AArch64_StackProbe_Windows_RegMask;
183
4
}
184
185
BitVector
186
15.1M
AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
187
15.1M
  const AArch64FrameLowering *TFI = getFrameLowering(MF);
188
15.1M
189
15.1M
  // FIXME: avoid re-calculating this every time.
190
15.1M
  BitVector Reserved(getNumRegs());
191
15.1M
  markSuperRegs(Reserved, AArch64::WSP);
192
15.1M
  markSuperRegs(Reserved, AArch64::WZR);
193
15.1M
194
15.1M
  if (TFI->hasFP(MF) || 
TT.isOSDarwin()1.30M
)
195
14.9M
    markSuperRegs(Reserved, AArch64::W29);
196
15.1M
197
484M
  for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); 
++i469M
) {
198
469M
    if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
199
14.8M
      markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
200
469M
  }
201
15.1M
202
15.1M
  if (hasBasePointer(MF))
203
704
    markSuperRegs(Reserved, AArch64::W19);
204
15.1M
205
15.1M
  // SLH uses register W16/X16 as the taint register.
206
15.1M
  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
207
531
    markSuperRegs(Reserved, AArch64::W16);
208
15.1M
209
15.1M
  assert(checkAllSuperRegsMarked(Reserved));
210
15.1M
  return Reserved;
211
15.1M
}
212
213
bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
214
14.3M
                                      unsigned Reg) const {
215
14.3M
  return getReservedRegs(MF)[Reg];
216
14.3M
}
217
218
1.50M
bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const {
219
1.50M
  return std::any_of(std::begin(*AArch64::GPR64argRegClass.MC),
220
1.50M
                     std::end(*AArch64::GPR64argRegClass.MC),
221
12.0M
                     [this, &MF](MCPhysReg r){return isReservedReg(MF, r);});
222
1.50M
}
223
224
void AArch64RegisterInfo::emitReservedArgRegCallError(
225
6
    const MachineFunction &MF) const {
226
6
  const Function &F = MF.getFunction();
227
6
  F.getContext().diagnose(DiagnosticInfoUnsupported{F, "AArch64 doesn't support"
228
6
    " function calls if any of the argument registers is reserved."});
229
6
}
230
231
bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
232
2.17k
                                          unsigned PhysReg) const {
233
2.17k
  return !isReservedReg(MF, PhysReg);
234
2.17k
}
235
236
41.0M
bool AArch64RegisterInfo::isConstantPhysReg(unsigned PhysReg) const {
237
41.0M
  return PhysReg == AArch64::WZR || 
PhysReg == AArch64::XZR38.6M
;
238
41.0M
}
239
240
const TargetRegisterClass *
241
AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
242
13.7k
                                      unsigned Kind) const {
243
13.7k
  return &AArch64::GPR64spRegClass;
244
13.7k
}
245
246
const TargetRegisterClass *
247
33
AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
248
33
  if (RC == &AArch64::CCRRegClass)
249
33
    return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
250
0
  return RC;
251
0
}
252
253
1.19k
unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
254
255
16.1M
bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
256
16.1M
  const MachineFrameInfo &MFI = MF.getFrameInfo();
257
16.1M
258
16.1M
  // In the presence of variable sized objects or funclets, if the fixed stack
259
16.1M
  // size is large enough that referencing from the FP won't result in things
260
16.1M
  // being in range relatively often, we can use a base pointer to allow access
261
16.1M
  // from the other direction like the SP normally works.
262
16.1M
  //
263
16.1M
  // Furthermore, if both variable sized objects are present, and the
264
16.1M
  // stack needs to be dynamically re-aligned, the base pointer is the only
265
16.1M
  // reliable way to reference the locals.
266
16.1M
  if (MFI.hasVarSizedObjects() || 
MF.hasEHFunclets()16.1M
) {
267
14.4k
    if (needsStackRealignment(MF))
268
1.34k
      return true;
269
13.1k
    // Conservatively estimate whether the negative offset from the frame
270
13.1k
    // pointer will be sufficient to reach. If a function has a smallish
271
13.1k
    // frame, it's less likely to have lots of spills and callee saved
272
13.1k
    // space, so it's all more likely to be within range of the frame pointer.
273
13.1k
    // If it's wrong, we'll materialize the constant and still get to the
274
13.1k
    // object; it's just suboptimal. Negative offsets use the unscaled
275
13.1k
    // load/store instructions, which have a 9-bit signed immediate.
276
13.1k
    return MFI.getLocalFrameSize() >= 256;
277
13.1k
  }
278
16.1M
279
16.1M
  return false;
280
16.1M
}
281
282
Register
283
92.6k
AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
284
92.6k
  const AArch64FrameLowering *TFI = getFrameLowering(MF);
285
92.6k
  return TFI->hasFP(MF) ? 
AArch64::FP88.7k
:
AArch64::SP3.89k
;
286
92.6k
}
287
288
bool AArch64RegisterInfo::requiresRegisterScavenging(
289
772k
    const MachineFunction &MF) const {
290
772k
  return true;
291
772k
}
292
293
bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
294
257k
    const MachineFunction &MF) const {
295
257k
  return true;
296
257k
}
297
298
bool
299
183k
AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
300
183k
  // This function indicates whether the emergency spillslot should be placed
301
183k
  // close to the beginning of the stackframe (closer to FP) or the end
302
183k
  // (closer to SP).
303
183k
  //
304
183k
  // The beginning works most reliably if we have a frame pointer.
305
183k
  const AArch64FrameLowering &TFI = *getFrameLowering(MF);
306
183k
  return TFI.hasFP(MF);
307
183k
}
308
309
bool AArch64RegisterInfo::requiresFrameIndexScavenging(
310
257k
    const MachineFunction &MF) const {
311
257k
  return true;
312
257k
}
313
314
bool
315
81.4k
AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
316
81.4k
  const MachineFrameInfo &MFI = MF.getFrameInfo();
317
81.4k
  if (MF.getTarget().Options.DisableFramePointerElim(MF) && 
MFI.adjustsStack()46.7k
)
318
0
    return true;
319
81.4k
  return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
320
81.4k
}
321
322
/// needsFrameBaseReg - Returns true if the instruction's frame index
323
/// reference would be better served by a base register other than FP
324
/// or SP. Used by LocalStackFrameAllocation to determine which frame index
325
/// references it should create new base registers for.
326
bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
327
268k
                                            int64_t Offset) const {
328
536k
  for (unsigned i = 0; !MI->getOperand(i).isFI(); 
++i268k
)
329
268k
    assert(i < MI->getNumOperands() &&
330
268k
           "Instr doesn't have FrameIndex operand!");
331
268k
332
268k
  // It's the load/store FI references that cause issues, as it can be difficult
333
268k
  // to materialize the offset if it won't fit in the literal field. Estimate
334
268k
  // based on the size of the local frame and some conservative assumptions
335
268k
  // about the rest of the stack frame (note, this is pre-regalloc, so
336
268k
  // we don't know everything for certain yet) whether this offset is likely
337
268k
  // to be out of range of the immediate. Return true if so.
338
268k
339
268k
  // We only generate virtual base registers for loads and stores, so
340
268k
  // return false for everything else.
341
268k
  if (!MI->mayLoad() && 
!MI->mayStore()207k
)
342
65.8k
    return false;
343
202k
344
202k
  // Without a virtual base register, if the function has variable sized
345
202k
  // objects, all fixed-size local references will be via the frame pointer,
346
202k
  // Approximate the offset and see if it's legal for the instruction.
347
202k
  // Note that the incoming offset is based on the SP value at function entry,
348
202k
  // so it'll be negative.
349
202k
  MachineFunction &MF = *MI->getParent()->getParent();
350
202k
  const AArch64FrameLowering *TFI = getFrameLowering(MF);
351
202k
  MachineFrameInfo &MFI = MF.getFrameInfo();
352
202k
353
202k
  // Estimate an offset from the frame pointer.
354
202k
  // Conservatively assume all GPR callee-saved registers get pushed.
355
202k
  // FP, LR, X19-X28, D8-D15. 64-bits each.
356
202k
  int64_t FPOffset = Offset - 16 * 20;
357
202k
  // Estimate an offset from the stack pointer.
358
202k
  // The incoming offset is relating to the SP at the start of the function,
359
202k
  // but when we access the local it'll be relative to the SP after local
360
202k
  // allocation, so adjust our SP-relative offset by that allocation size.
361
202k
  Offset += MFI.getLocalFrameSize();
362
202k
  // Assume that we'll have at least some spill slots allocated.
363
202k
  // FIXME: This is a total SWAG number. We should run some statistics
364
202k
  //        and pick a real one.
365
202k
  Offset += 128; // 128 bytes of spill slots
366
202k
367
202k
  // If there is a frame pointer, try using it.
368
202k
  // The FP is only available if there is no dynamic realignment. We
369
202k
  // don't know for sure yet whether we'll need that, so we guess based
370
202k
  // on whether there are any local variables that would trigger it.
371
202k
  if (TFI->hasFP(MF) && 
isFrameOffsetLegal(MI, AArch64::FP, FPOffset)199k
)
372
0
    return false;
373
202k
374
202k
  // If we can reference via the stack pointer or base pointer, try that.
375
202k
  // FIXME: This (and the code that resolves the references) can be improved
376
202k
  //        to only disallow SP relative references in the live range of
377
202k
  //        the VLA(s). In practice, it's unclear how much difference that
378
202k
  //        would make, but it may be worth doing.
379
202k
  if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
380
199k
    return false;
381
3.25k
382
3.25k
  // The offset likely isn't legal; we want to allocate a virtual base register.
383
3.25k
  return true;
384
3.25k
}
385
386
bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
387
                                             unsigned BaseReg,
388
404k
                                             int64_t Offset) const {
389
404k
  assert(Offset <= INT_MAX && "Offset too big to fit in int.");
390
404k
  assert(MI && "Unable to get the legal offset for nil instruction.");
391
404k
  int SaveOffset = Offset;
392
404k
  return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
393
404k
}
394
395
/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
396
/// at the beginning of the basic block.
397
void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
398
                                                       unsigned BaseReg,
399
                                                       int FrameIdx,
400
313
                                                       int64_t Offset) const {
401
313
  MachineBasicBlock::iterator Ins = MBB->begin();
402
313
  DebugLoc DL; // Defaults to "unknown"
403
313
  if (Ins != MBB->end())
404
313
    DL = Ins->getDebugLoc();
405
313
  const MachineFunction &MF = *MBB->getParent();
406
313
  const AArch64InstrInfo *TII =
407
313
      MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
408
313
  const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
409
313
  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
410
313
  MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
411
313
  unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
412
313
413
313
  BuildMI(*MBB, Ins, DL, MCID, BaseReg)
414
313
      .addFrameIndex(FrameIdx)
415
313
      .addImm(Offset)
416
313
      .addImm(Shifter);
417
313
}
418
419
void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
420
1.78k
                                            int64_t Offset) const {
421
1.78k
  int Off = Offset; // ARM doesn't need the general 64-bit offsets
422
1.78k
  unsigned i = 0;
423
1.78k
424
3.56k
  while (!MI.getOperand(i).isFI()) {
425
1.78k
    ++i;
426
1.78k
    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
427
1.78k
  }
428
1.78k
  const MachineFunction *MF = MI.getParent()->getParent();
429
1.78k
  const AArch64InstrInfo *TII =
430
1.78k
      MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
431
1.78k
  bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
432
1.78k
  assert(Done && "Unable to resolve frame index!");
433
1.78k
  (void)Done;
434
1.78k
}
435
436
void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
437
                                              int SPAdj, unsigned FIOperandNum,
438
595k
                                              RegScavenger *RS) const {
439
595k
  assert(SPAdj == 0 && "Unexpected");
440
595k
441
595k
  MachineInstr &MI = *II;
442
595k
  MachineBasicBlock &MBB = *MI.getParent();
443
595k
  MachineFunction &MF = *MBB.getParent();
444
595k
  const AArch64InstrInfo *TII =
445
595k
      MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
446
595k
  const AArch64FrameLowering *TFI = getFrameLowering(MF);
447
595k
448
595k
  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
449
595k
  unsigned FrameReg;
450
595k
  int Offset;
451
595k
452
595k
  // Special handling of dbg_value, stackmap and patchpoint instructions.
453
595k
  if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP ||
454
595k
      
MI.getOpcode() == TargetOpcode::PATCHPOINT595k
) {
455
29
    Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
456
29
                                             /*PreferFP=*/true,
457
29
                                             /*ForSimm=*/false);
458
29
    Offset += MI.getOperand(FIOperandNum + 1).getImm();
459
29
    MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
460
29
    MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
461
29
    return;
462
29
  }
463
595k
464
595k
  if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
465
5
    MachineOperand &FI = MI.getOperand(FIOperandNum);
466
5
    Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
467
5
    FI.ChangeToImmediate(Offset);
468
5
    return;
469
5
  }
470
595k
471
595k
  if (MI.getOpcode() == AArch64::TAGPstack) {
472
6
    // TAGPstack must use the virtual frame register in its 3rd operand.
473
6
    const MachineFrameInfo &MFI = MF.getFrameInfo();
474
6
    const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
475
6
    FrameReg = MI.getOperand(3).getReg();
476
6
    Offset =
477
6
        MFI.getObjectOffset(FrameIndex) + AFI->getTaggedBasePointerOffset();
478
595k
  } else {
479
595k
    Offset = TFI->resolveFrameIndexReference(
480
595k
        MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
481
595k
  }
482
595k
483
595k
  // Modify MI as necessary to handle as much of 'Offset' as possible
484
595k
  if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
485
591k
    return;
486
3.24k
487
3.24k
  assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
488
3.24k
         "Emergency spill slot is out of reach");
489
3.24k
490
3.24k
  // If we get here, the immediate doesn't fit into the instruction.  We folded
491
3.24k
  // as much as possible above.  Handle the rest, providing a register that is
492
3.24k
  // SP+LargeImm.
493
3.24k
  unsigned ScratchReg =
494
3.24k
      MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
495
3.24k
  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
496
3.24k
  MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
497
3.24k
}
498
499
unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
500
1.62k
                                                  MachineFunction &MF) const {
501
1.62k
  const AArch64FrameLowering *TFI = getFrameLowering(MF);
502
1.62k
503
1.62k
  switch (RC->getID()) {
504
1.62k
  default:
505
1.32k
    return 0;
506
1.62k
  case AArch64::GPR32RegClassID:
507
120
  case AArch64::GPR32spRegClassID:
508
120
  case AArch64::GPR32allRegClassID:
509
120
  case AArch64::GPR64spRegClassID:
510
120
  case AArch64::GPR64allRegClassID:
511
120
  case AArch64::GPR64RegClassID:
512
120
  case AArch64::GPR32commonRegClassID:
513
120
  case AArch64::GPR64commonRegClassID:
514
120
    return 32 - 1                                   // XZR/SP
515
120
              - (TFI->hasFP(MF) || 
TT.isOSDarwin()0
) // FP
516
120
              - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
517
120
              - hasBasePointer(MF);  // X19
518
120
  case AArch64::FPR8RegClassID:
519
75
  case AArch64::FPR16RegClassID:
520
75
  case AArch64::FPR32RegClassID:
521
75
  case AArch64::FPR64RegClassID:
522
75
  case AArch64::FPR128RegClassID:
523
75
    return 32;
524
75
525
90
  case AArch64::DDRegClassID:
526
90
  case AArch64::DDDRegClassID:
527
90
  case AArch64::DDDDRegClassID:
528
90
  case AArch64::QQRegClassID:
529
90
  case AArch64::QQQRegClassID:
530
90
  case AArch64::QQQQRegClassID:
531
90
    return 32;
532
90
533
90
  case AArch64::FPR128_loRegClassID:
534
15
    return 16;
535
1.62k
  }
536
1.62k
}
537
538
unsigned AArch64RegisterInfo::getLocalAddressRegister(
539
15
  const MachineFunction &MF) const {
540
15
  const auto &MFI = MF.getFrameInfo();
541
15
  if (!MF.hasEHFunclets() && 
!MFI.hasVarSizedObjects()0
)
542
0
    return AArch64::SP;
543
15
  else if (needsStackRealignment(MF))
544
6
    return getBaseRegister();
545
9
  return getFrameRegister(MF);
546
9
}