Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/Mips/MipsFastISel.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
2
//
3
// The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
///
10
/// \file
11
/// \brief This file defines the MIPS-specific support for the FastISel class.
12
/// Some of the target-specific code is generated by tablegen in the file
13
/// MipsGenFastISel.inc, which is #included here.
14
///
15
//===----------------------------------------------------------------------===//
16
17
#include "MCTargetDesc/MipsABIInfo.h"
18
#include "MCTargetDesc/MipsBaseInfo.h"
19
#include "MipsCCState.h"
20
#include "MipsISelLowering.h"
21
#include "MipsInstrInfo.h"
22
#include "MipsMachineFunction.h"
23
#include "MipsSubtarget.h"
24
#include "MipsTargetMachine.h"
25
#include "llvm/ADT/APInt.h"
26
#include "llvm/ADT/ArrayRef.h"
27
#include "llvm/ADT/DenseMap.h"
28
#include "llvm/ADT/SmallVector.h"
29
#include "llvm/Analysis/TargetLibraryInfo.h"
30
#include "llvm/CodeGen/CallingConvLower.h"
31
#include "llvm/CodeGen/FastISel.h"
32
#include "llvm/CodeGen/FunctionLoweringInfo.h"
33
#include "llvm/CodeGen/ISDOpcodes.h"
34
#include "llvm/CodeGen/MachineBasicBlock.h"
35
#include "llvm/CodeGen/MachineFrameInfo.h"
36
#include "llvm/CodeGen/MachineInstrBuilder.h"
37
#include "llvm/CodeGen/MachineMemOperand.h"
38
#include "llvm/CodeGen/MachineRegisterInfo.h"
39
#include "llvm/CodeGen/MachineValueType.h"
40
#include "llvm/CodeGen/ValueTypes.h"
41
#include "llvm/IR/Attributes.h"
42
#include "llvm/IR/CallingConv.h"
43
#include "llvm/IR/Constant.h"
44
#include "llvm/IR/Constants.h"
45
#include "llvm/IR/DataLayout.h"
46
#include "llvm/IR/Function.h"
47
#include "llvm/IR/GetElementPtrTypeIterator.h"
48
#include "llvm/IR/GlobalValue.h"
49
#include "llvm/IR/GlobalVariable.h"
50
#include "llvm/IR/InstrTypes.h"
51
#include "llvm/IR/Instruction.h"
52
#include "llvm/IR/Instructions.h"
53
#include "llvm/IR/IntrinsicInst.h"
54
#include "llvm/IR/Operator.h"
55
#include "llvm/IR/Type.h"
56
#include "llvm/IR/User.h"
57
#include "llvm/IR/Value.h"
58
#include "llvm/MC/MCInstrDesc.h"
59
#include "llvm/MC/MCRegisterInfo.h"
60
#include "llvm/MC/MCSymbol.h"
61
#include "llvm/Support/Casting.h"
62
#include "llvm/Support/Compiler.h"
63
#include "llvm/Support/Debug.h"
64
#include "llvm/Support/ErrorHandling.h"
65
#include "llvm/Support/MathExtras.h"
66
#include "llvm/Support/raw_ostream.h"
67
#include "llvm/Target/TargetInstrInfo.h"
68
#include "llvm/Target/TargetLowering.h"
69
#include <algorithm>
70
#include <cassert>
71
#include <cstdint>
72
73
#define DEBUG_TYPE "mips-fastisel"
74
75
using namespace llvm;
76
77
namespace {
78
79
class MipsFastISel final : public FastISel {
80
81
  // All possible address modes.
82
  class Address {
83
  public:
84
    using BaseKind = enum { RegBase, FrameIndexBase };
85
86
  private:
87
    BaseKind Kind = RegBase;
88
    union {
89
      unsigned Reg;
90
      int FI;
91
    } Base;
92
93
    int64_t Offset = 0;
94
95
    const GlobalValue *GV = nullptr;
96
97
  public:
98
    // Innocuous defaults for our address.
99
640
    Address() { Base.Reg = 0; }
100
101
15
    void setKind(BaseKind K) { Kind = K; }
102
0
    BaseKind getKind() const { return Kind; }
103
581
    bool isRegBase() const { return Kind == RegBase; }
104
15
    bool isFIBase() const { return Kind == FrameIndexBase; }
105
106
570
    void setReg(unsigned Reg) {
107
570
      assert(isRegBase() && "Invalid base register access!");
108
570
      Base.Reg = Reg;
109
570
    }
110
111
1.13k
    unsigned getReg() const {
112
1.13k
      assert(isRegBase() && "Invalid base register access!");
113
1.13k
      return Base.Reg;
114
1.13k
    }
115
116
15
    void setFI(unsigned FI) {
117
15
      assert(isFIBase() && "Invalid base frame index access!");
118
15
      Base.FI = FI;
119
15
    }
120
121
15
    unsigned getFI() const {
122
15
      assert(isFIBase() && "Invalid base frame index access!");
123
15
      return Base.FI;
124
15
    }
125
126
14
    void setOffset(int64_t Offset_) { Offset = Offset_; }
127
1.16k
    int64_t getOffset() const { return Offset; }
128
59
    void setGlobalValue(const GlobalValue *G) { GV = G; }
129
112
    const GlobalValue *getGlobalValue() { return GV; }
130
  };
131
132
  /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
133
  /// make the right decision when generating code for different targets.
134
  const TargetMachine &TM;
135
  const MipsSubtarget *Subtarget;
136
  const TargetInstrInfo &TII;
137
  const TargetLowering &TLI;
138
  MipsFunctionInfo *MFI;
139
140
  // Convenience variables to avoid some queries.
141
  LLVMContext *Context;
142
143
  bool fastLowerArguments() override;
144
  bool fastLowerCall(CallLoweringInfo &CLI) override;
145
  bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
146
147
  bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
148
  // floating point but not reject doing fast-isel in other
149
  // situations
150
151
private:
152
  // Selection routines.
153
  bool selectLogicalOp(const Instruction *I);
154
  bool selectLoad(const Instruction *I);
155
  bool selectStore(const Instruction *I);
156
  bool selectBranch(const Instruction *I);
157
  bool selectSelect(const Instruction *I);
158
  bool selectCmp(const Instruction *I);
159
  bool selectFPExt(const Instruction *I);
160
  bool selectFPTrunc(const Instruction *I);
161
  bool selectFPToInt(const Instruction *I, bool IsSigned);
162
  bool selectRet(const Instruction *I);
163
  bool selectTrunc(const Instruction *I);
164
  bool selectIntExt(const Instruction *I);
165
  bool selectShift(const Instruction *I);
166
  bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
167
168
  // Utility helper routines.
169
  bool isTypeLegal(Type *Ty, MVT &VT);
170
  bool isTypeSupported(Type *Ty, MVT &VT);
171
  bool isLoadTypeLegal(Type *Ty, MVT &VT);
172
  bool computeAddress(const Value *Obj, Address &Addr);
173
  bool computeCallAddress(const Value *V, Address &Addr);
174
  void simplifyAddress(Address &Addr);
175
176
  // Emit helper routines.
177
  bool emitCmp(unsigned DestReg, const CmpInst *CI);
178
  bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
179
                unsigned Alignment = 0);
180
  bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
181
                 MachineMemOperand *MMO = nullptr);
182
  bool emitStore(MVT VT, unsigned SrcReg, Address &Addr,
183
                 unsigned Alignment = 0);
184
  unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
185
  bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
186
187
                  bool IsZExt);
188
  bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
189
190
  bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
191
  bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
192
                       unsigned DestReg);
193
  bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
194
                       unsigned DestReg);
195
196
  unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
197
198
  unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
199
                         const Value *RHS);
200
201
  unsigned materializeFP(const ConstantFP *CFP, MVT VT);
202
  unsigned materializeGV(const GlobalValue *GV, MVT VT);
203
  unsigned materializeInt(const Constant *C, MVT VT);
204
  unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
205
  unsigned materializeExternalCallSym(MCSymbol *Syn);
206
207
702
  MachineInstrBuilder emitInst(unsigned Opc) {
208
702
    return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
209
702
  }
210
211
1.77k
  MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
212
1.77k
    return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
213
1.77k
                   DstReg);
214
1.77k
  }
215
216
  MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
217
249
                                    unsigned MemReg, int64_t MemOffset) {
218
249
    return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
219
249
  }
220
221
  MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
222
317
                                   unsigned MemReg, int64_t MemOffset) {
223
317
    return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
224
317
  }
225
226
  unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
227
                           const TargetRegisterClass *RC,
228
                           unsigned Op0, bool Op0IsKill,
229
                           unsigned Op1, bool Op1IsKill);
230
231
  // for some reason, this default is not generated by tablegen
232
  // so we explicitly generate it here.
233
  unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
234
                             unsigned Op0, bool Op0IsKill, uint64_t imm1,
235
0
                             uint64_t imm2, unsigned Op3, bool Op3IsKill) {
236
0
    return 0;
237
0
  }
238
239
  // Call handling routines.
240
private:
241
  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
242
  bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
243
                       unsigned &NumBytes);
244
  bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
245
246
289
  const MipsABIInfo &getABI() const {
247
289
    return static_cast<const MipsTargetMachine &>(TM).getABI();
248
289
  }
249
250
public:
251
  // Backend specific FastISel code.
252
  explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
253
                        const TargetLibraryInfo *libInfo)
254
      : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
255
        Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
256
301
        TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
257
301
    MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
258
301
    Context = &funcInfo.Fn->getContext();
259
300
    UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
260
301
  }
261
262
  unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
263
  unsigned fastMaterializeConstant(const Constant *C) override;
264
  bool fastSelectInstruction(const Instruction *I) override;
265
266
#include "MipsGenFastISel.inc"
267
};
268
269
} // end anonymous namespace
270
271
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
272
                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
273
                    CCState &State) LLVM_ATTRIBUTE_UNUSED;
274
275
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
276
                            CCValAssign::LocInfo LocInfo,
277
0
                            ISD::ArgFlagsTy ArgFlags, CCState &State) {
278
0
  llvm_unreachable("should not be called");
279
0
}
280
281
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
282
                            CCValAssign::LocInfo LocInfo,
283
0
                            ISD::ArgFlagsTy ArgFlags, CCState &State) {
284
0
  llvm_unreachable("should not be called");
285
0
}
286
287
#include "MipsGenCallingConv.inc"
288
289
59
CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
290
59
  return CC_MipsO32;
291
59
}
292
293
unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
294
42
                                     const Value *LHS, const Value *RHS) {
295
42
  // Canonicalize immediates to the RHS first.
296
42
  if (
isa<ConstantInt>(LHS) && 42
!isa<ConstantInt>(RHS)2
)
297
2
    std::swap(LHS, RHS);
298
42
299
42
  unsigned Opc;
300
42
  switch (ISDOpc) {
301
14
  case ISD::AND:
302
14
    Opc = Mips::AND;
303
14
    break;
304
14
  case ISD::OR:
305
14
    Opc = Mips::OR;
306
14
    break;
307
14
  case ISD::XOR:
308
14
    Opc = Mips::XOR;
309
14
    break;
310
0
  default:
311
0
    llvm_unreachable("unexpected opcode");
312
42
  }
313
42
314
42
  unsigned LHSReg = getRegForValue(LHS);
315
42
  if (!LHSReg)
316
0
    return 0;
317
42
318
42
  unsigned RHSReg;
319
42
  if (const auto *C = dyn_cast<ConstantInt>(RHS))
320
30
    RHSReg = materializeInt(C, MVT::i32);
321
42
  else
322
12
    RHSReg = getRegForValue(RHS);
323
42
  if (!RHSReg)
324
0
    return 0;
325
42
326
42
  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
327
42
  if (!ResultReg)
328
0
    return 0;
329
42
330
42
  emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
331
42
  return ResultReg;
332
42
}
333
334
2
unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
335
2
  assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
336
2
         "Alloca should always return a pointer.");
337
2
338
2
  DenseMap<const AllocaInst *, int>::iterator SI =
339
2
      FuncInfo.StaticAllocaMap.find(AI);
340
2
341
2
  if (
SI != FuncInfo.StaticAllocaMap.end()2
) {
342
2
    unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
343
2
    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu),
344
2
            ResultReg)
345
2
        .addFrameIndex(SI->second)
346
2
        .addImm(0);
347
2
    return ResultReg;
348
2
  }
349
0
350
0
  return 0;
351
0
}
352
353
165
unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
354
165
  if (
VT != MVT::i32 && 165
VT != MVT::i1646
&&
VT != MVT::i824
&&
VT != MVT::i11
)
355
0
    return 0;
356
165
  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
357
165
  const ConstantInt *CI = cast<ConstantInt>(C);
358
165
  return materialize32BitInt(CI->getZExtValue(), RC);
359
165
}
360
361
unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
362
225
                                           const TargetRegisterClass *RC) {
363
225
  unsigned ResultReg = createResultReg(RC);
364
225
365
225
  if (
isInt<16>(Imm)225
) {
366
143
    unsigned Opc = Mips::ADDiu;
367
143
    emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
368
143
    return ResultReg;
369
82
  } else 
if (82
isUInt<16>(Imm)82
) {
370
8
    emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
371
8
    return ResultReg;
372
8
  }
373
74
  unsigned Lo = Imm & 0xFFFF;
374
74
  unsigned Hi = (Imm >> 16) & 0xFFFF;
375
74
  if (
Lo74
) {
376
70
    // Both Lo and Hi have nonzero bits.
377
70
    unsigned TmpReg = createResultReg(RC);
378
70
    emitInst(Mips::LUi, TmpReg).addImm(Hi);
379
70
    emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
380
74
  } else {
381
4
    emitInst(Mips::LUi, ResultReg).addImm(Hi);
382
4
  }
383
225
  return ResultReg;
384
225
}
385
386
46
unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
387
46
  if (UnsupportedFPMode)
388
0
    return 0;
389
46
  int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
390
46
  if (
VT == MVT::f3246
) {
391
36
    const TargetRegisterClass *RC = &Mips::FGR32RegClass;
392
36
    unsigned DestReg = createResultReg(RC);
393
36
    unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
394
36
    emitInst(Mips::MTC1, DestReg).addReg(TempReg);
395
36
    return DestReg;
396
10
  } else 
if (10
VT == MVT::f6410
) {
397
10
    const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
398
10
    unsigned DestReg = createResultReg(RC);
399
10
    unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
400
10
    unsigned TempReg2 =
401
10
        materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
402
10
    emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
403
10
    return DestReg;
404
10
  }
405
0
  return 0;
406
0
}
407
408
585
unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
409
585
  // For now 32-bit only.
410
585
  if (VT != MVT::i32)
411
0
    return 0;
412
585
  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
413
585
  unsigned DestReg = createResultReg(RC);
414
585
  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
415
532
  bool IsThreadLocal = GVar && GVar->isThreadLocal();
416
585
  // TLS not supported at this time.
417
585
  if (IsThreadLocal)
418
0
    return 0;
419
585
  emitInst(Mips::LW, DestReg)
420
585
      .addReg(MFI->getGlobalBaseReg())
421
585
      .addGlobalAddress(GV, 0, MipsII::MO_GOT);
422
585
  if ((GV->hasInternalLinkage() ||
423
585
       
(GV->hasLocalLinkage() && 585
!isa<Function>(GV)2
))) {
424
2
    unsigned TempReg = createResultReg(RC);
425
2
    emitInst(Mips::ADDiu, TempReg)
426
2
        .addReg(DestReg)
427
2
        .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
428
2
    DestReg = TempReg;
429
2
  }
430
585
  return DestReg;
431
585
}
432
433
6
unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
434
6
  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
435
6
  unsigned DestReg = createResultReg(RC);
436
6
  emitInst(Mips::LW, DestReg)
437
6
      .addReg(MFI->getGlobalBaseReg())
438
6
      .addSym(Sym, MipsII::MO_GOT);
439
6
  return DestReg;
440
6
}
441
442
// Materialize a constant into a register, and return the register
443
// number (or zero if we failed to handle it).
444
721
unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
445
721
  EVT CEVT = TLI.getValueType(DL, C->getType(), true);
446
721
447
721
  // Only handle simple types.
448
721
  if (!CEVT.isSimple())
449
0
    return 0;
450
721
  MVT VT = CEVT.getSimpleVT();
451
721
452
721
  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
453
46
    
return (UnsupportedFPMode) ? 46
00
:
materializeFP(CFP, VT)46
;
454
675
  else 
if (const GlobalValue *675
GV675
= dyn_cast<GlobalValue>(C))
455
532
    return materializeGV(GV, VT);
456
143
  else 
if (143
isa<ConstantInt>(C)143
)
457
135
    return materializeInt(C, VT);
458
8
459
8
  return 0;
460
8
}
461
462
591
bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
463
591
  const User *U = nullptr;
464
591
  unsigned Opcode = Instruction::UserOp1;
465
591
  if (const Instruction *
I591
= dyn_cast<Instruction>(Obj)) {
466
28
    // Don't walk into other basic blocks unless the object is an alloca from
467
28
    // another block, otherwise it may not have a virtual register assigned.
468
28
    if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
469
28
        
FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB13
) {
470
28
      Opcode = I->getOpcode();
471
28
      U = I;
472
28
    }
473
591
  } else 
if (const ConstantExpr *563
C563
= dyn_cast<ConstantExpr>(Obj)) {
474
2
    Opcode = C->getOpcode();
475
2
    U = C;
476
2
  }
477
591
  switch (Opcode) {
478
566
  default:
479
566
    break;
480
0
  case Instruction::BitCast:
481
0
    // Look through bitcasts.
482
0
    return computeAddress(U->getOperand(0), Addr);
483
10
  case Instruction::GetElementPtr: {
484
10
    Address SavedAddr = Addr;
485
10
    int64_t TmpOffset = Addr.getOffset();
486
10
    // Iterate through the GEP folding the constants into offsets where
487
10
    // we can.
488
10
    gep_type_iterator GTI = gep_type_begin(U);
489
26
    for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
490
16
         
++i, ++GTI16
) {
491
16
      const Value *Op = *i;
492
16
      if (StructType *
STy16
= GTI.getStructTypeOrNull()) {
493
2
        const StructLayout *SL = DL.getStructLayout(STy);
494
2
        unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
495
2
        TmpOffset += SL->getElementOffset(Idx);
496
16
      } else {
497
14
        uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
498
14
        while (
true14
) {
499
14
          if (const ConstantInt *
CI14
= dyn_cast<ConstantInt>(Op)) {
500
14
            // Constant-offset addressing.
501
14
            TmpOffset += CI->getSExtValue() * S;
502
14
            break;
503
14
          }
504
0
          
if (0
canFoldAddIntoGEP(U, Op)0
) {
505
0
            // A compatible add with a constant operand. Fold the constant.
506
0
            ConstantInt *CI =
507
0
                cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
508
0
            TmpOffset += CI->getSExtValue() * S;
509
0
            // Iterate on the other operand.
510
0
            Op = cast<AddOperator>(Op)->getOperand(0);
511
0
            continue;
512
0
          }
513
0
          // Unsupported
514
0
          goto unsupported_gep;
515
0
        }
516
14
      }
517
16
    }
518
10
    // Try to grab the base operand now.
519
10
    Addr.setOffset(TmpOffset);
520
10
    if (computeAddress(U->getOperand(0), Addr))
521
10
      return true;
522
0
    // We failed, restore everything and try the other options.
523
0
    Addr = SavedAddr;
524
0
  unsupported_gep:
525
0
    break;
526
0
  }
527
15
  case Instruction::Alloca: {
528
15
    const AllocaInst *AI = cast<AllocaInst>(Obj);
529
15
    DenseMap<const AllocaInst *, int>::iterator SI =
530
15
        FuncInfo.StaticAllocaMap.find(AI);
531
15
    if (
SI != FuncInfo.StaticAllocaMap.end()15
) {
532
15
      Addr.setKind(Address::FrameIndexBase);
533
15
      Addr.setFI(SI->second);
534
15
      return true;
535
15
    }
536
0
    break;
537
0
  }
538
566
  }
539
566
  Addr.setReg(getRegForValue(Obj));
540
566
  return Addr.getReg() != 0;
541
566
}
542
543
60
bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
544
60
  const User *U = nullptr;
545
60
  unsigned Opcode = Instruction::UserOp1;
546
60
547
60
  if (const auto *
I60
= dyn_cast<Instruction>(V)) {
548
0
    // Check if the value is defined in the same basic block. This information
549
0
    // is crucial to know whether or not folding an operand is valid.
550
0
    if (
I->getParent() == FuncInfo.MBB->getBasicBlock()0
) {
551
0
      Opcode = I->getOpcode();
552
0
      U = I;
553
0
    }
554
60
  } else 
if (const auto *60
C60
= dyn_cast<ConstantExpr>(V)) {
555
1
    Opcode = C->getOpcode();
556
1
    U = C;
557
1
  }
558
60
559
60
  switch (Opcode) {
560
59
  default:
561
59
    break;
562
1
  case Instruction::BitCast:
563
1
    // Look past bitcasts if its operand is in the same BB.
564
1
      return computeCallAddress(U->getOperand(0), Addr);
565
0
    break;
566
0
  case Instruction::IntToPtr:
567
0
    // Look past no-op inttoptrs if its operand is in the same BB.
568
0
    if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
569
0
        TLI.getPointerTy(DL))
570
0
      return computeCallAddress(U->getOperand(0), Addr);
571
0
    break;
572
0
  case Instruction::PtrToInt:
573
0
    // Look past no-op ptrtoints if its operand is in the same BB.
574
0
    if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
575
0
      return computeCallAddress(U->getOperand(0), Addr);
576
0
    break;
577
59
  }
578
59
579
59
  
if (const GlobalValue *59
GV59
= dyn_cast<GlobalValue>(V)) {
580
59
    Addr.setGlobalValue(GV);
581
59
    return true;
582
59
  }
583
0
584
0
  // If all else fails, try to materialize the value in a register.
585
0
  
if (0
!Addr.getGlobalValue()0
) {
586
0
    Addr.setReg(getRegForValue(V));
587
0
    return Addr.getReg() != 0;
588
0
  }
589
0
590
0
  return false;
591
0
}
592
593
811
bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
594
811
  EVT evt = TLI.getValueType(DL, Ty, true);
595
811
  // Only handle simple types.
596
811
  if (
evt == MVT::Other || 811
!evt.isSimple()811
)
597
0
    return false;
598
811
  VT = evt.getSimpleVT();
599
811
600
811
  // Handle all legal types, i.e. a register that will directly hold this
601
811
  // value.
602
811
  return TLI.isTypeLegal(VT);
603
811
}
604
605
66
bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
606
66
  if (Ty->isVectorTy())
607
0
    return false;
608
66
609
66
  
if (66
isTypeLegal(Ty, VT)66
)
610
7
    return true;
611
59
612
59
  // If this is a type than can be sign or zero-extended to a basic operation
613
59
  // go ahead and accept it now.
614
59
  
if (59
VT == MVT::i1 || 59
VT == MVT::i858
||
VT == MVT::i1633
)
615
59
    return true;
616
0
617
0
  return false;
618
0
}
619
620
581
bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
621
581
  if (isTypeLegal(Ty, VT))
622
315
    return true;
623
266
  // We will extend this in a later patch:
624
266
  //   If this is a type than can be sign or zero-extended to a basic operation
625
266
  //   go ahead and accept it now.
626
266
  
if (266
VT == MVT::i8 || 266
VT == MVT::i16124
)
627
266
    return true;
628
0
  return false;
629
0
}
630
631
// Because of how EmitCmp is called with fast-isel, you can
632
// end up with redundant "andi" instructions after the sequences emitted below.
633
// We should try and solve this issue in the future.
634
//
635
56
bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
636
56
  const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
637
56
  bool IsUnsigned = CI->isUnsigned();
638
56
  unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
639
56
  if (LeftReg == 0)
640
0
    return false;
641
56
  unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
642
56
  if (RightReg == 0)
643
0
    return false;
644
56
  CmpInst::Predicate P = CI->getPredicate();
645
56
646
56
  switch (P) {
647
0
  default:
648
0
    return false;
649
4
  case CmpInst::ICMP_EQ: {
650
4
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
651
4
    emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
652
4
    emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
653
4
    break;
654
56
  }
655
11
  case CmpInst::ICMP_NE: {
656
11
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
657
11
    emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
658
11
    emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
659
11
    break;
660
56
  }
661
2
  case CmpInst::ICMP_UGT:
662
2
    emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
663
2
    break;
664
2
  case CmpInst::ICMP_ULT:
665
2
    emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
666
2
    break;
667
2
  case CmpInst::ICMP_UGE: {
668
2
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
669
2
    emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
670
2
    emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
671
2
    break;
672
56
  }
673
2
  case CmpInst::ICMP_ULE: {
674
2
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
675
2
    emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
676
2
    emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
677
2
    break;
678
56
  }
679
2
  case CmpInst::ICMP_SGT:
680
2
    emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
681
2
    break;
682
3
  case CmpInst::ICMP_SLT:
683
3
    emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
684
3
    break;
685
2
  case CmpInst::ICMP_SGE: {
686
2
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
687
2
    emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
688
2
    emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
689
2
    break;
690
56
  }
691
2
  case CmpInst::ICMP_SLE: {
692
2
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
693
2
    emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
694
2
    emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
695
2
    break;
696
56
  }
697
24
  case CmpInst::FCMP_OEQ:
698
24
  case CmpInst::FCMP_UNE:
699
24
  case CmpInst::FCMP_OLT:
700
24
  case CmpInst::FCMP_OLE:
701
24
  case CmpInst::FCMP_OGT:
702
24
  case CmpInst::FCMP_OGE: {
703
24
    if (UnsupportedFPMode)
704
0
      return false;
705
24
    bool IsFloat = Left->getType()->isFloatTy();
706
24
    bool IsDouble = Left->getType()->isDoubleTy();
707
24
    if (
!IsFloat && 24
!IsDouble12
)
708
0
      return false;
709
24
    unsigned Opc, CondMovOpc;
710
24
    switch (P) {
711
4
    case CmpInst::FCMP_OEQ:
712
4
      Opc = IsFloat ? 
Mips::C_EQ_S2
:
Mips::C_EQ_D322
;
713
4
      CondMovOpc = Mips::MOVT_I;
714
4
      break;
715
4
    case CmpInst::FCMP_UNE:
716
4
      Opc = IsFloat ? 
Mips::C_EQ_S2
:
Mips::C_EQ_D322
;
717
4
      CondMovOpc = Mips::MOVF_I;
718
4
      break;
719
4
    case CmpInst::FCMP_OLT:
720
4
      Opc = IsFloat ? 
Mips::C_OLT_S2
:
Mips::C_OLT_D322
;
721
4
      CondMovOpc = Mips::MOVT_I;
722
4
      break;
723
4
    case CmpInst::FCMP_OLE:
724
4
      Opc = IsFloat ? 
Mips::C_OLE_S2
:
Mips::C_OLE_D322
;
725
4
      CondMovOpc = Mips::MOVT_I;
726
4
      break;
727
4
    case CmpInst::FCMP_OGT:
728
4
      Opc = IsFloat ? 
Mips::C_ULE_S2
:
Mips::C_ULE_D322
;
729
4
      CondMovOpc = Mips::MOVF_I;
730
4
      break;
731
4
    case CmpInst::FCMP_OGE:
732
4
      Opc = IsFloat ? 
Mips::C_ULT_S2
:
Mips::C_ULT_D322
;
733
4
      CondMovOpc = Mips::MOVF_I;
734
4
      break;
735
0
    default:
736
0
      llvm_unreachable("Only switching of a subset of CCs.");
737
24
    }
738
24
    unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass);
739
24
    unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
740
24
    emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
741
24
    emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
742
24
    emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
743
24
                 .addReg(RightReg);
744
24
    emitInst(CondMovOpc, ResultReg)
745
24
        .addReg(RegWithOne)
746
24
        .addReg(Mips::FCC0)
747
24
        .addReg(RegWithZero);
748
24
    break;
749
24
  }
750
56
  }
751
56
  return true;
752
56
}
753
754
bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
755
324
                            unsigned Alignment) {
756
324
  //
757
324
  // more cases will be handled here in following patches.
758
324
  //
759
324
  unsigned Opc;
760
324
  switch (VT.SimpleTy) {
761
91
  case MVT::i32:
762
91
    ResultReg = createResultReg(&Mips::GPR32RegClass);
763
91
    Opc = Mips::LW;
764
91
    break;
765
69
  case MVT::i16:
766
69
    ResultReg = createResultReg(&Mips::GPR32RegClass);
767
69
    Opc = Mips::LHu;
768
69
    break;
769
100
  case MVT::i8:
770
100
    ResultReg = createResultReg(&Mips::GPR32RegClass);
771
100
    Opc = Mips::LBu;
772
100
    break;
773
33
  case MVT::f32:
774
33
    if (UnsupportedFPMode)
775
0
      return false;
776
33
    ResultReg = createResultReg(&Mips::FGR32RegClass);
777
33
    Opc = Mips::LWC1;
778
33
    break;
779
31
  case MVT::f64:
780
31
    if (UnsupportedFPMode)
781
0
      return false;
782
31
    ResultReg = createResultReg(&Mips::AFGR64RegClass);
783
31
    Opc = Mips::LDC1;
784
31
    break;
785
0
  default:
786
0
    return false;
787
324
  }
788
324
  
if (324
Addr.isRegBase()324
) {
789
317
    simplifyAddress(Addr);
790
317
    emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
791
317
    return true;
792
317
  }
793
7
  
if (7
Addr.isFIBase()7
) {
794
7
    unsigned FI = Addr.getFI();
795
7
    unsigned Align = 4;
796
7
    int64_t Offset = Addr.getOffset();
797
7
    MachineFrameInfo &MFI = MF->getFrameInfo();
798
7
    MachineMemOperand *MMO = MF->getMachineMemOperand(
799
7
        MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
800
7
        MFI.getObjectSize(FI), Align);
801
7
    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
802
7
        .addFrameIndex(FI)
803
7
        .addImm(Offset)
804
7
        .addMemOperand(MMO);
805
7
    return true;
806
7
  }
807
0
  return false;
808
0
}
809
810
bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
811
257
                             unsigned Alignment) {
812
257
  //
813
257
  // more cases will be handled here in following patches.
814
257
  //
815
257
  unsigned Opc;
816
257
  switch (VT.SimpleTy) {
817
42
  case MVT::i8:
818
42
    Opc = Mips::SB;
819
42
    break;
820
55
  case MVT::i16:
821
55
    Opc = Mips::SH;
822
55
    break;
823
140
  case MVT::i32:
824
140
    Opc = Mips::SW;
825
140
    break;
826
12
  case MVT::f32:
827
12
    if (UnsupportedFPMode)
828
0
      return false;
829
12
    Opc = Mips::SWC1;
830
12
    break;
831
8
  case MVT::f64:
832
8
    if (UnsupportedFPMode)
833
0
      return false;
834
8
    Opc = Mips::SDC1;
835
8
    break;
836
0
  default:
837
0
    return false;
838
257
  }
839
257
  
if (257
Addr.isRegBase()257
) {
840
249
    simplifyAddress(Addr);
841
249
    emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
842
249
    return true;
843
249
  }
844
8
  
if (8
Addr.isFIBase()8
) {
845
8
    unsigned FI = Addr.getFI();
846
8
    unsigned Align = 4;
847
8
    int64_t Offset = Addr.getOffset();
848
8
    MachineFrameInfo &MFI = MF->getFrameInfo();
849
8
    MachineMemOperand *MMO = MF->getMachineMemOperand(
850
8
        MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
851
8
        MFI.getObjectSize(FI), Align);
852
8
    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
853
8
        .addReg(SrcReg)
854
8
        .addFrameIndex(FI)
855
8
        .addImm(Offset)
856
8
        .addMemOperand(MMO);
857
8
    return true;
858
8
  }
859
0
  return false;
860
0
}
861
862
42
bool MipsFastISel::selectLogicalOp(const Instruction *I) {
863
42
  MVT VT;
864
42
  if (!isTypeSupported(I->getType(), VT))
865
0
    return false;
866
42
867
42
  unsigned ResultReg;
868
42
  switch (I->getOpcode()) {
869
0
  default:
870
0
    llvm_unreachable("Unexpected instruction.");
871
14
  case Instruction::And:
872
14
    ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
873
14
    break;
874
14
  case Instruction::Or:
875
14
    ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
876
14
    break;
877
14
  case Instruction::Xor:
878
14
    ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
879
14
    break;
880
42
  }
881
42
882
42
  
if (42
!ResultReg42
)
883
0
    return false;
884
42
885
42
  updateValueMap(I, ResultReg);
886
42
  return true;
887
42
}
888
889
324
bool MipsFastISel::selectLoad(const Instruction *I) {
890
324
  // Atomic loads need special handling.
891
324
  if (cast<LoadInst>(I)->isAtomic())
892
0
    return false;
893
324
894
324
  // Verify we have a legal type before going any further.
895
324
  MVT VT;
896
324
  if (!isLoadTypeLegal(I->getType(), VT))
897
0
    return false;
898
324
899
324
  // See if we can handle this address.
900
324
  Address Addr;
901
324
  if (!computeAddress(I->getOperand(0), Addr))
902
0
    return false;
903
324
904
324
  unsigned ResultReg;
905
324
  if (!emitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
906
0
    return false;
907
324
  updateValueMap(I, ResultReg);
908
324
  return true;
909
324
}
910
911
257
bool MipsFastISel::selectStore(const Instruction *I) {
912
257
  Value *Op0 = I->getOperand(0);
913
257
  unsigned SrcReg = 0;
914
257
915
257
  // Atomic stores need special handling.
916
257
  if (cast<StoreInst>(I)->isAtomic())
917
0
    return false;
918
257
919
257
  // Verify we have a legal type before going any further.
920
257
  MVT VT;
921
257
  if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
922
0
    return false;
923
257
924
257
  // Get the value to be stored into a register.
925
257
  SrcReg = getRegForValue(Op0);
926
257
  if (SrcReg == 0)
927
0
    return false;
928
257
929
257
  // See if we can handle this address.
930
257
  Address Addr;
931
257
  if (!computeAddress(I->getOperand(1), Addr))
932
0
    return false;
933
257
934
257
  
if (257
!emitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())257
)
935
0
    return false;
936
257
  return true;
937
257
}
938
939
// This can cause a redundant sltiu to be generated.
940
// FIXME: try and eliminate this in a future patch.
941
4
bool MipsFastISel::selectBranch(const Instruction *I) {
942
4
  const BranchInst *BI = cast<BranchInst>(I);
943
4
  MachineBasicBlock *BrBB = FuncInfo.MBB;
944
4
  //
945
4
  // TBB is the basic block for the case where the comparison is true.
946
4
  // FBB is the basic block for the case where the comparison is false.
947
4
  // if (cond) goto TBB
948
4
  // goto FBB
949
4
  // TBB:
950
4
  //
951
4
  MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
952
4
  MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
953
4
  BI->getCondition();
954
4
  // For now, just try the simplest case where it's fed by a compare.
955
4
  if (const CmpInst *
CI4
= dyn_cast<CmpInst>(BI->getCondition())) {
956
4
    unsigned CondReg = createResultReg(&Mips::GPR32RegClass);
957
4
    if (!emitCmp(CondReg, CI))
958
0
      return false;
959
4
    BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ))
960
4
        .addReg(CondReg)
961
4
        .addMBB(TBB);
962
4
    finishCondBranch(BI->getParent(), TBB, FBB);
963
4
    return true;
964
4
  }
965
0
  return false;
966
0
}
967
968
52
bool MipsFastISel::selectCmp(const Instruction *I) {
969
52
  const CmpInst *CI = cast<CmpInst>(I);
970
52
  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
971
52
  if (!emitCmp(ResultReg, CI))
972
0
    return false;
973
52
  updateValueMap(I, ResultReg);
974
52
  return true;
975
52
}
976
977
// Attempt to fast-select a floating-point extend instruction.
978
2
bool MipsFastISel::selectFPExt(const Instruction *I) {
979
2
  if (UnsupportedFPMode)
980
0
    return false;
981
2
  Value *Src = I->getOperand(0);
982
2
  EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
983
2
  EVT DestVT = TLI.getValueType(DL, I->getType(), true);
984
2
985
2
  if (
SrcVT != MVT::f32 || 2
DestVT != MVT::f642
)
986
0
    return false;
987
2
988
2
  unsigned SrcReg =
989
2
      getRegForValue(Src); // this must be a 32bit floating point register class
990
2
                           // maybe we should handle this differently
991
2
  if (!SrcReg)
992
0
    return false;
993
2
994
2
  unsigned DestReg = createResultReg(&Mips::AFGR64RegClass);
995
2
  emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
996
2
  updateValueMap(I, DestReg);
997
2
  return true;
998
2
}
999
1000
8
bool MipsFastISel::selectSelect(const Instruction *I) {
1001
8
  assert(isa<SelectInst>(I) && "Expected a select instruction.");
1002
8
1003
8
  DEBUG(dbgs() << "selectSelect\n");
1004
8
1005
8
  MVT VT;
1006
8
  if (
!isTypeSupported(I->getType(), VT) || 8
UnsupportedFPMode8
) {
1007
0
    DEBUG(dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1008
0
    return false;
1009
0
  }
1010
8
1011
8
  unsigned CondMovOpc;
1012
8
  const TargetRegisterClass *RC;
1013
8
1014
8
  if (
VT.isInteger() && 8
!VT.isVector()4
&&
VT.getSizeInBits() <= 324
) {
1015
4
    CondMovOpc = Mips::MOVN_I_I;
1016
4
    RC = &Mips::GPR32RegClass;
1017
8
  } else 
if (4
VT == MVT::f324
) {
1018
2
    CondMovOpc = Mips::MOVN_I_S;
1019
2
    RC = &Mips::FGR32RegClass;
1020
4
  } else 
if (2
VT == MVT::f642
) {
1021
2
    CondMovOpc = Mips::MOVN_I_D32;
1022
2
    RC = &Mips::AFGR64RegClass;
1023
2
  } else
1024
0
    return false;
1025
8
1026
8
  const SelectInst *SI = cast<SelectInst>(I);
1027
8
  const Value *Cond = SI->getCondition();
1028
8
  unsigned Src1Reg = getRegForValue(SI->getTrueValue());
1029
8
  unsigned Src2Reg = getRegForValue(SI->getFalseValue());
1030
8
  unsigned CondReg = getRegForValue(Cond);
1031
8
1032
8
  if (
!Src1Reg || 8
!Src2Reg8
||
!CondReg8
)
1033
0
    return false;
1034
8
1035
8
  unsigned ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1036
8
  if (!ZExtCondReg)
1037
0
    return false;
1038
8
1039
8
  
if (8
!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true)8
)
1040
0
    return false;
1041
8
1042
8
  unsigned ResultReg = createResultReg(RC);
1043
8
  unsigned TempReg = createResultReg(RC);
1044
8
1045
8
  if (
!ResultReg || 8
!TempReg8
)
1046
0
    return false;
1047
8
1048
8
  emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1049
8
  emitInst(CondMovOpc, ResultReg)
1050
8
    .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1051
8
  updateValueMap(I, ResultReg);
1052
8
  return true;
1053
8
}
1054
1055
// Attempt to fast-select a floating-point truncate instruction.
1056
2
bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1057
2
  if (UnsupportedFPMode)
1058
0
    return false;
1059
2
  Value *Src = I->getOperand(0);
1060
2
  EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1061
2
  EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1062
2
1063
2
  if (
SrcVT != MVT::f64 || 2
DestVT != MVT::f322
)
1064
0
    return false;
1065
2
1066
2
  unsigned SrcReg = getRegForValue(Src);
1067
2
  if (!SrcReg)
1068
0
    return false;
1069
2
1070
2
  unsigned DestReg = createResultReg(&Mips::FGR32RegClass);
1071
2
  if (!DestReg)
1072
0
    return false;
1073
2
1074
2
  emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1075
2
  updateValueMap(I, DestReg);
1076
2
  return true;
1077
2
}
1078
1079
// Attempt to fast-select a floating-point-to-integer conversion.
1080
4
bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1081
4
  if (UnsupportedFPMode)
1082
0
    return false;
1083
4
  MVT DstVT, SrcVT;
1084
4
  if (!IsSigned)
1085
0
    return false; // We don't handle this case yet. There is no native
1086
4
                  // instruction for this but it can be synthesized.
1087
4
  Type *DstTy = I->getType();
1088
4
  if (!isTypeLegal(DstTy, DstVT))
1089
0
    return false;
1090
4
1091
4
  
if (4
DstVT != MVT::i324
)
1092
0
    return false;
1093
4
1094
4
  Value *Src = I->getOperand(0);
1095
4
  Type *SrcTy = Src->getType();
1096
4
  if (!isTypeLegal(SrcTy, SrcVT))
1097
0
    return false;
1098
4
1099
4
  
if (4
SrcVT != MVT::f32 && 4
SrcVT != MVT::f642
)
1100
0
    return false;
1101
4
1102
4
  unsigned SrcReg = getRegForValue(Src);
1103
4
  if (SrcReg == 0)
1104
0
    return false;
1105
4
1106
4
  // Determine the opcode for the conversion, which takes place
1107
4
  // entirely within FPRs.
1108
4
  unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1109
4
  unsigned TempReg = createResultReg(&Mips::FGR32RegClass);
1110
4
  unsigned Opc = (SrcVT == MVT::f32) ? 
Mips::TRUNC_W_S2
:
Mips::TRUNC_W_D322
;
1111
4
1112
4
  // Generate the convert.
1113
4
  emitInst(Opc, TempReg).addReg(SrcReg);
1114
4
  emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1115
4
1116
4
  updateValueMap(I, DestReg);
1117
4
  return true;
1118
4
}
1119
1120
bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1121
                                   SmallVectorImpl<MVT> &OutVTs,
1122
59
                                   unsigned &NumBytes) {
1123
59
  CallingConv::ID CC = CLI.CallConv;
1124
59
  SmallVector<CCValAssign, 16> ArgLocs;
1125
59
  CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1126
59
  CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1127
59
  // Get a count of how many bytes are to be pushed on the stack.
1128
59
  NumBytes = CCInfo.getNextStackOffset();
1129
59
  // This is the minimum argument area used for A0-A3.
1130
59
  if (NumBytes < 16)
1131
41
    NumBytes = 16;
1132
59
1133
59
  emitInst(Mips::ADJCALLSTACKDOWN).addImm(16).addImm(0);
1134
59
  // Process the args.
1135
59
  MVT firstMVT;
1136
215
  for (unsigned i = 0, e = ArgLocs.size(); 
i != e215
;
++i156
) {
1137
156
    CCValAssign &VA = ArgLocs[i];
1138
156
    const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1139
156
    MVT ArgVT = OutVTs[VA.getValNo()];
1140
156
1141
156
    if (
i == 0156
) {
1142
58
      firstMVT = ArgVT;
1143
58
      if (
ArgVT == MVT::f3258
) {
1144
10
        VA.convertToReg(Mips::F12);
1145
58
      } else 
if (48
ArgVT == MVT::f6448
) {
1146
4
        VA.convertToReg(Mips::D6);
1147
4
      }
1148
156
    } else 
if (98
i == 198
) {
1149
48
      if (
(firstMVT == MVT::f32) || 48
(firstMVT == MVT::f64)40
) {
1150
10
        if (
ArgVT == MVT::f3210
) {
1151
2
          VA.convertToReg(Mips::F14);
1152
10
        } else 
if (8
ArgVT == MVT::f648
) {
1153
2
          VA.convertToReg(Mips::D7);
1154
2
        }
1155
10
      }
1156
98
    }
1157
156
    if (
((ArgVT == MVT::i32) || 156
(ArgVT == MVT::f32)78
||
(ArgVT == MVT::i16)48
||
1158
28
         (ArgVT == MVT::i8)) &&
1159
156
        
VA.isMemLoc()150
) {
1160
138
      switch (VA.getLocMemOffset()) {
1161
44
      case 0:
1162
44
        VA.convertToReg(Mips::A0);
1163
44
        break;
1164
44
      case 4:
1165
44
        VA.convertToReg(Mips::A1);
1166
44
        break;
1167
34
      case 8:
1168
34
        VA.convertToReg(Mips::A2);
1169
34
        break;
1170
16
      case 12:
1171
16
        VA.convertToReg(Mips::A3);
1172
16
        break;
1173
0
      default:
1174
0
        break;
1175
156
      }
1176
156
    }
1177
156
    unsigned ArgReg = getRegForValue(ArgVal);
1178
156
    if (!ArgReg)
1179
0
      return false;
1180
156
1181
156
    // Handle arg promotion: SExt, ZExt, AExt.
1182
156
    switch (VA.getLocInfo()) {
1183
114
    case CCValAssign::Full:
1184
114
      break;
1185
42
    case CCValAssign::AExt:
1186
42
    case CCValAssign::SExt: {
1187
42
      MVT DestVT = VA.getLocVT();
1188
42
      MVT SrcVT = ArgVT;
1189
42
      ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1190
42
      if (!ArgReg)
1191
0
        return false;
1192
42
      break;
1193
42
    }
1194
0
    case CCValAssign::ZExt: {
1195
0
      MVT DestVT = VA.getLocVT();
1196
0
      MVT SrcVT = ArgVT;
1197
0
      ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1198
0
      if (!ArgReg)
1199
0
        return false;
1200
0
      break;
1201
0
    }
1202
0
    default:
1203
0
      llvm_unreachable("Unknown arg promotion!");
1204
156
    }
1205
156
1206
156
    // Now copy/store arg to correct locations.
1207
156
    
if (156
VA.isRegLoc() && 156
!VA.needsCustom()156
) {
1208
156
      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1209
156
              TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1210
156
      CLI.OutRegs.push_back(VA.getLocReg());
1211
156
    } else 
if (0
VA.needsCustom()0
) {
1212
0
      llvm_unreachable("Mips does not use custom args.");
1213
0
      return false;
1214
0
    } else {
1215
0
      //
1216
0
      // FIXME: This path will currently return false. It was copied
1217
0
      // from the AArch64 port and should be essentially fine for Mips too.
1218
0
      // The work to finish up this path will be done in a follow-on patch.
1219
0
      //
1220
0
      assert(VA.isMemLoc() && "Assuming store on stack.");
1221
0
      // Don't emit stores for undef values.
1222
0
      if (isa<UndefValue>(ArgVal))
1223
0
        continue;
1224
0
1225
0
      // Need to store on the stack.
1226
0
      // FIXME: This alignment is incorrect but this path is disabled
1227
0
      // for now (will return false). We need to determine the right alignment
1228
0
      // based on the normal alignment for the underlying machine type.
1229
0
      //
1230
0
      unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1231
0
1232
0
      unsigned BEAlign = 0;
1233
0
      if (
ArgSize < 8 && 0
!Subtarget->isLittle()0
)
1234
0
        BEAlign = 8 - ArgSize;
1235
0
1236
0
      Address Addr;
1237
0
      Addr.setKind(Address::RegBase);
1238
0
      Addr.setReg(Mips::SP);
1239
0
      Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1240
0
1241
0
      unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
1242
0
      MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1243
0
          MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1244
0
          MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1245
0
      (void)(MMO);
1246
0
      // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1247
0
      return false; // can't store on the stack yet.
1248
0
    }
1249
156
  }
1250
59
1251
59
  return true;
1252
59
}
1253
1254
bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1255
59
                              unsigned NumBytes) {
1256
59
  CallingConv::ID CC = CLI.CallConv;
1257
59
  emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1258
59
  if (
RetVT != MVT::isVoid59
) {
1259
0
    SmallVector<CCValAssign, 16> RVLocs;
1260
0
    MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1261
0
1262
0
    CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy,
1263
0
                             CLI.Symbol ? CLI.Symbol->getName().data()
1264
0
                                        : nullptr);
1265
0
1266
0
    // Only handle a single return value.
1267
0
    if (RVLocs.size() != 1)
1268
0
      return false;
1269
0
    // Copy all of the result registers out of their specified physreg.
1270
0
    MVT CopyVT = RVLocs[0].getValVT();
1271
0
    // Special handling for extended integers.
1272
0
    if (
RetVT == MVT::i1 || 0
RetVT == MVT::i80
||
RetVT == MVT::i160
)
1273
0
      CopyVT = MVT::i32;
1274
0
1275
0
    unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1276
0
    if (!ResultReg)
1277
0
      return false;
1278
0
    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1279
0
            TII.get(TargetOpcode::COPY),
1280
0
            ResultReg).addReg(RVLocs[0].getLocReg());
1281
0
    CLI.InRegs.push_back(RVLocs[0].getLocReg());
1282
0
1283
0
    CLI.ResultReg = ResultReg;
1284
0
    CLI.NumResultRegs = 1;
1285
0
  }
1286
59
  return true;
1287
59
}
1288
1289
301
bool MipsFastISel::fastLowerArguments() {
1290
301
  DEBUG(dbgs() << "fastLowerArguments\n");
1291
301
1292
301
  if (
!FuncInfo.CanLowerReturn301
) {
1293
0
    DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1294
0
    return false;
1295
0
  }
1296
301
1297
301
  const Function *F = FuncInfo.Fn;
1298
301
  if (
F->isVarArg()301
) {
1299
0
    DEBUG(dbgs() << ".. gave up (varargs)\n");
1300
0
    return false;
1301
0
  }
1302
301
1303
301
  CallingConv::ID CC = F->getCallingConv();
1304
301
  if (
CC != CallingConv::C301
) {
1305
0
    DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1306
0
    return false;
1307
0
  }
1308
301
1309
301
  const ArrayRef<MCPhysReg> GPR32ArgRegs = {Mips::A0, Mips::A1, Mips::A2,
1310
301
                                            Mips::A3};
1311
301
  const ArrayRef<MCPhysReg> FGR32ArgRegs = {Mips::F12, Mips::F14};
1312
301
  const ArrayRef<MCPhysReg> AFGR64ArgRegs = {Mips::D6, Mips::D7};
1313
301
  ArrayRef<MCPhysReg>::iterator NextGPR32 = GPR32ArgRegs.begin();
1314
301
  ArrayRef<MCPhysReg>::iterator NextFGR32 = FGR32ArgRegs.begin();
1315
301
  ArrayRef<MCPhysReg>::iterator NextAFGR64 = AFGR64ArgRegs.begin();
1316
301
1317
301
  struct AllocatedReg {
1318
301
    const TargetRegisterClass *RC;
1319
301
    unsigned Reg;
1320
301
    AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1321
40
        : RC(RC), Reg(Reg) {}
1322
301
  };
1323
301
1324
301
  // Only handle simple cases. i.e. All arguments are directly mapped to
1325
301
  // registers of the appropriate type.
1326
301
  SmallVector<AllocatedReg, 4> Allocation;
1327
52
  for (const auto &FormalArg : F->args()) {
1328
52
    if (FormalArg.hasAttribute(Attribute::InReg) ||
1329
52
        FormalArg.hasAttribute(Attribute::StructRet) ||
1330
52
        
FormalArg.hasAttribute(Attribute::ByVal)52
) {
1331
0
      DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1332
0
      return false;
1333
0
    }
1334
52
1335
52
    Type *ArgTy = FormalArg.getType();
1336
52
    if (
ArgTy->isStructTy() || 52
ArgTy->isArrayTy()52
||
ArgTy->isVectorTy()52
) {
1337
0
      DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1338
0
      return false;
1339
0
    }
1340
52
1341
52
    EVT ArgVT = TLI.getValueType(DL, ArgTy);
1342
52
    DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
1343
52
                 << ArgVT.getEVTString() << "\n");
1344
52
    if (
!ArgVT.isSimple()52
) {
1345
0
      DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1346
0
      return false;
1347
0
    }
1348
52
1349
52
    switch (ArgVT.getSimpleVT().SimpleTy) {
1350
3
    case MVT::i1:
1351
3
    case MVT::i8:
1352
3
    case MVT::i16:
1353
3
      if (!FormalArg.hasAttribute(Attribute::SExt) &&
1354
3
          
!FormalArg.hasAttribute(Attribute::ZExt)3
) {
1355
3
        // It must be any extend, this shouldn't happen for clang-generated IR
1356
3
        // so just fall back on SelectionDAG.
1357
3
        DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1358
3
        return false;
1359
3
      }
1360
0
1361
0
      
if (0
NextGPR32 == GPR32ArgRegs.end()0
) {
1362
0
        DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1363
0
        return false;
1364
0
      }
1365
0
1366
0
      
DEBUG0
(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1367
0
      Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1368
0
1369
0
      // Allocating any GPR32 prohibits further use of floating point arguments.
1370
0
      NextFGR32 = FGR32ArgRegs.end();
1371
0
      NextAFGR64 = AFGR64ArgRegs.end();
1372
0
      break;
1373
0
1374
37
    case MVT::i32:
1375
37
      if (
FormalArg.hasAttribute(Attribute::ZExt)37
) {
1376
0
        // The O32 ABI does not permit a zero-extended i32.
1377
0
        DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1378
0
        return false;
1379
0
      }
1380
37
1381
37
      
if (37
NextGPR32 == GPR32ArgRegs.end()37
) {
1382
3
        DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1383
3
        return false;
1384
3
      }
1385
34
1386
34
      
DEBUG34
(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1387
34
      Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1388
34
1389
34
      // Allocating any GPR32 prohibits further use of floating point arguments.
1390
34
      NextFGR32 = FGR32ArgRegs.end();
1391
34
      NextAFGR64 = AFGR64ArgRegs.end();
1392
34
      break;
1393
34
1394
3
    case MVT::f32:
1395
3
      if (
UnsupportedFPMode3
) {
1396
0
        DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1397
0
        return false;
1398
0
      }
1399
3
      
if (3
NextFGR32 == FGR32ArgRegs.end()3
) {
1400
1
        DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1401
1
        return false;
1402
1
      }
1403
2
      
DEBUG2
(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1404
2
      Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1405
2
      // Allocating an FGR32 also allocates the super-register AFGR64, and
1406
2
      // ABI rules require us to skip the corresponding GPR32.
1407
2
      if (NextGPR32 != GPR32ArgRegs.end())
1408
2
        NextGPR32++;
1409
2
      if (NextAFGR64 != AFGR64ArgRegs.end())
1410
2
        NextAFGR64++;
1411
2
      break;
1412
2
1413
7
    case MVT::f64:
1414
7
      if (
UnsupportedFPMode7
) {
1415
2
        DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1416
2
        return false;
1417
2
      }
1418
5
      
if (5
NextAFGR64 == AFGR64ArgRegs.end()5
) {
1419
1
        DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1420
1
        return false;
1421
1
      }
1422
4
      
DEBUG4
(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1423
4
      Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1424
4
      // Allocating an FGR32 also allocates the super-register AFGR64, and
1425
4
      // ABI rules require us to skip the corresponding GPR32 pair.
1426
4
      if (NextGPR32 != GPR32ArgRegs.end())
1427
4
        NextGPR32++;
1428
4
      if (NextGPR32 != GPR32ArgRegs.end())
1429
4
        NextGPR32++;
1430
4
      if (NextFGR32 != FGR32ArgRegs.end())
1431
4
        NextFGR32++;
1432
4
      break;
1433
4
1434
2
    default:
1435
2
      DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1436
3
      return false;
1437
289
    }
1438
289
  }
1439
289
1440
289
  
for (const auto &FormalArg : F->args()) 289
{
1441
28
    unsigned ArgNo = FormalArg.getArgNo();
1442
28
    unsigned SrcReg = Allocation[ArgNo].Reg;
1443
28
    unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
1444
28
    // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1445
28
    // Without this, EmitLiveInCopies may eliminate the livein if its only
1446
28
    // use is a bitcast (which isn't turned into an instruction).
1447
28
    unsigned ResultReg = createResultReg(Allocation[ArgNo].RC);
1448
28
    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1449
28
            TII.get(TargetOpcode::COPY), ResultReg)
1450
28
        .addReg(DstReg, getKillRegState(true));
1451
28
    updateValueMap(&FormalArg, ResultReg);
1452
28
  }
1453
301
1454
301
  // Calculate the size of the incoming arguments area.
1455
301
  // We currently reject all the cases where this would be non-zero.
1456
301
  unsigned IncomingArgSizeInBytes = 0;
1457
301
1458
301
  // Account for the reserved argument area on ABI's that have one (O32).
1459
301
  // It seems strange to do this on the caller side but it's necessary in
1460
301
  // SelectionDAG's implementation.
1461
301
  IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1462
301
                                    IncomingArgSizeInBytes);
1463
301
1464
301
  MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1465
301
                                                    false);
1466
301
1467
301
  return true;
1468
301
}
1469
1470
65
bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1471
65
  CallingConv::ID CC = CLI.CallConv;
1472
65
  bool IsTailCall = CLI.IsTailCall;
1473
65
  bool IsVarArg = CLI.IsVarArg;
1474
65
  const Value *Callee = CLI.Callee;
1475
65
  MCSymbol *Symbol = CLI.Symbol;
1476
65
1477
65
  // Do not handle FastCC.
1478
65
  if (CC == CallingConv::Fast)
1479
1
    return false;
1480
64
1481
64
  // Allow SelectionDAG isel to handle tail calls.
1482
64
  
if (64
IsTailCall64
)
1483
1
    return false;
1484
63
1485
63
  // Let SDISel handle vararg functions.
1486
63
  
if (63
IsVarArg63
)
1487
4
    return false;
1488
59
1489
59
  // FIXME: Only handle *simple* calls for now.
1490
59
  MVT RetVT;
1491
59
  if (CLI.RetTy->isVoidTy())
1492
59
    RetVT = MVT::isVoid;
1493
0
  else 
if (0
!isTypeSupported(CLI.RetTy, RetVT)0
)
1494
0
    return false;
1495
59
1496
59
  for (auto Flag : CLI.OutFlags)
1497
156
    
if (156
Flag.isInReg() || 156
Flag.isSRet()156
||
Flag.isNest()156
||
Flag.isByVal()156
)
1498
0
      return false;
1499
59
1500
59
  // Set up the argument vectors.
1501
59
  SmallVector<MVT, 16> OutVTs;
1502
59
  OutVTs.reserve(CLI.OutVals.size());
1503
59
1504
156
  for (auto *Val : CLI.OutVals) {
1505
156
    MVT VT;
1506
156
    if (!isTypeLegal(Val->getType(), VT) &&
1507
42
        
!(VT == MVT::i1 || 42
VT == MVT::i842
||
VT == MVT::i1620
))
1508
0
      return false;
1509
156
1510
156
    // We don't handle vector parameters yet.
1511
156
    
if (156
VT.isVector() || 156
VT.getSizeInBits() > 64156
)
1512
0
      return false;
1513
156
1514
156
    OutVTs.push_back(VT);
1515
156
  }
1516
59
1517
59
  Address Addr;
1518
59
  if (!computeCallAddress(Callee, Addr))
1519
0
    return false;
1520
59
1521
59
  // Handle the arguments now that we've gotten them.
1522
59
  unsigned NumBytes;
1523
59
  if (!processCallArgs(CLI, OutVTs, NumBytes))
1524
0
    return false;
1525
59
1526
59
  
if (59
!Addr.getGlobalValue()59
)
1527
0
    return false;
1528
59
1529
59
  // Issue the call.
1530
59
  unsigned DestAddress;
1531
59
  if (Symbol)
1532
6
    DestAddress = materializeExternalCallSym(Symbol);
1533
59
  else
1534
53
    DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1535
59
  emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1536
59
  MachineInstrBuilder MIB =
1537
59
      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR),
1538
59
              Mips::RA).addReg(Mips::T9);
1539
59
1540
59
  // Add implicit physical register uses to the call.
1541
59
  for (auto Reg : CLI.OutRegs)
1542
156
    MIB.addReg(Reg, RegState::Implicit);
1543
65
1544
65
  // Add a register mask with the call-preserved registers.
1545
65
  // Proper defs for return values will be added by setPhysRegsDeadExcept().
1546
65
  MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1547
65
1548
65
  CLI.Call = MIB;
1549
65
1550
65
  // Finish off the call including any return values.
1551
65
  return finishCall(CLI, RetVT, NumBytes);
1552
65
}
1553
1554
12
bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1555
12
  switch (II->getIntrinsicID()) {
1556
2
  default:
1557
2
    return false;
1558
4
  case Intrinsic::bswap: {
1559
4
    Type *RetTy = II->getCalledFunction()->getReturnType();
1560
4
1561
4
    MVT VT;
1562
4
    if (!isTypeSupported(RetTy, VT))
1563
0
      return false;
1564
4
1565
4
    unsigned SrcReg = getRegForValue(II->getOperand(0));
1566
4
    if (SrcReg == 0)
1567
0
      return false;
1568
4
    unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1569
4
    if (DestReg == 0)
1570
0
      return false;
1571
4
    
if (4
VT == MVT::i164
) {
1572
2
      if (
Subtarget->hasMips32r2()2
) {
1573
1
        emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1574
1
        updateValueMap(II, DestReg);
1575
1
        return true;
1576
0
      } else {
1577
1
        unsigned TempReg[3];
1578
4
        for (int i = 0; 
i < 34
;
i++3
) {
1579
3
          TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1580
3
          if (TempReg[i] == 0)
1581
0
            return false;
1582
3
        }
1583
1
        emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1584
1
        emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1585
1
        emitInst(Mips::OR, TempReg[2]).addReg(TempReg[0]).addReg(TempReg[1]);
1586
1
        emitInst(Mips::ANDi, DestReg).addReg(TempReg[2]).addImm(0xFFFF);
1587
1
        updateValueMap(II, DestReg);
1588
1
        return true;
1589
4
      }
1590
2
    } else 
if (2
VT == MVT::i322
) {
1591
2
      if (
Subtarget->hasMips32r2()2
) {
1592
1
        unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1593
1
        emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1594
1
        emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1595
1
        updateValueMap(II, DestReg);
1596
1
        return true;
1597
0
      } else {
1598
1
        unsigned TempReg[8];
1599
9
        for (int i = 0; 
i < 89
;
i++8
) {
1600
8
          TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1601
8
          if (TempReg[i] == 0)
1602
0
            return false;
1603
8
        }
1604
1
1605
1
        emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1606
1
        emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1607
1
        emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1608
1
        emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1609
1
1610
1
        emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1611
1
        emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1612
1
1613
1
        emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1614
1
        emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1615
1
        emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1616
1
        updateValueMap(II, DestReg);
1617
1
        return true;
1618
0
      }
1619
2
    }
1620
0
    return false;
1621
0
  }
1622
4
  case Intrinsic::memcpy:
1623
4
  case Intrinsic::memmove: {
1624
4
    const auto *MTI = cast<MemTransferInst>(II);
1625
4
    // Don't handle volatile.
1626
4
    if (MTI->isVolatile())
1627
0
      return false;
1628
4
    
if (4
!MTI->getLength()->getType()->isIntegerTy(32)4
)
1629
0
      return false;
1630
4
    
const char *IntrMemName = isa<MemCpyInst>(II) ? 4
"memcpy"2
:
"memmove"2
;
1631
4
    return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
1632
4
  }
1633
2
  case Intrinsic::memset: {
1634
2
    const MemSetInst *MSI = cast<MemSetInst>(II);
1635
2
    // Don't handle volatile.
1636
2
    if (MSI->isVolatile())
1637
0
      return false;
1638
2
    
if (2
!MSI->getLength()->getType()->isIntegerTy(32)2
)
1639
0
      return false;
1640
2
    return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
1641
2
  }
1642
0
  }
1643
0
  return false;
1644
0
}
1645
1646
299
bool MipsFastISel::selectRet(const Instruction *I) {
1647
299
  const Function &F = *I->getParent()->getParent();
1648
299
  const ReturnInst *Ret = cast<ReturnInst>(I);
1649
299
1650
299
  DEBUG(dbgs() << "selectRet\n");
1651
299
1652
299
  if (!FuncInfo.CanLowerReturn)
1653
0
    return false;
1654
299
1655
299
  // Build a list of return value registers.
1656
299
  SmallVector<unsigned, 4> RetRegs;
1657
299
1658
299
  if (
Ret->getNumOperands() > 0299
) {
1659
31
    CallingConv::ID CC = F.getCallingConv();
1660
31
1661
31
    // Do not handle FastCC.
1662
31
    if (CC == CallingConv::Fast)
1663
0
      return false;
1664
31
1665
31
    SmallVector<ISD::OutputArg, 4> Outs;
1666
31
    GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1667
31
1668
31
    // Analyze operands of the call, assigning locations to each operand.
1669
31
    SmallVector<CCValAssign, 16> ValLocs;
1670
31
    MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1671
31
                       I->getContext());
1672
31
    CCAssignFn *RetCC = RetCC_Mips;
1673
31
    CCInfo.AnalyzeReturn(Outs, RetCC);
1674
31
1675
31
    // Only handle a single return value for now.
1676
31
    if (ValLocs.size() != 1)
1677
4
      return false;
1678
27
1679
27
    CCValAssign &VA = ValLocs[0];
1680
27
    const Value *RV = Ret->getOperand(0);
1681
27
1682
27
    // Don't bother handling odd stuff for now.
1683
27
    if ((VA.getLocInfo() != CCValAssign::Full) &&
1684
0
        (VA.getLocInfo() != CCValAssign::BCvt))
1685
0
      return false;
1686
27
1687
27
    // Only handle register returns for now.
1688
27
    
if (27
!VA.isRegLoc()27
)
1689
0
      return false;
1690
27
1691
27
    unsigned Reg = getRegForValue(RV);
1692
27
    if (Reg == 0)
1693
0
      return false;
1694
27
1695
27
    unsigned SrcReg = Reg + VA.getValNo();
1696
27
    unsigned DestReg = VA.getLocReg();
1697
27
    // Avoid a cross-class copy. This is very unlikely.
1698
27
    if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1699
0
      return false;
1700
27
1701
27
    EVT RVEVT = TLI.getValueType(DL, RV->getType());
1702
27
    if (!RVEVT.isSimple())
1703
0
      return false;
1704
27
1705
27
    
if (27
RVEVT.isVector()27
)
1706
0
      return false;
1707
27
1708
27
    MVT RVVT = RVEVT.getSimpleVT();
1709
27
    if (RVVT == MVT::f128)
1710
0
      return false;
1711
27
1712
27
    // Do not handle FGR64 returns for now.
1713
27
    
if (27
RVVT == MVT::f64 && 27
UnsupportedFPMode5
) {
1714
0
      DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1715
0
      return false;
1716
0
    }
1717
27
1718
27
    MVT DestVT = VA.getValVT();
1719
27
    // Special handling for extended integers.
1720
27
    if (
RVVT != DestVT27
) {
1721
8
      if (
RVVT != MVT::i1 && 8
RVVT != MVT::i87
&&
RVVT != MVT::i164
)
1722
0
        return false;
1723
8
1724
8
      
if (8
Outs[0].Flags.isZExt() || 8
Outs[0].Flags.isSExt()8
) {
1725
2
        bool IsZExt = Outs[0].Flags.isZExt();
1726
2
        SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1727
2
        if (SrcReg == 0)
1728
0
          return false;
1729
27
      }
1730
8
    }
1731
27
1732
27
    // Make the copy.
1733
27
    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1734
27
            TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1735
27
1736
27
    // Add register to return instruction.
1737
27
    RetRegs.push_back(VA.getLocReg());
1738
27
  }
1739
295
  MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1740
322
  for (unsigned i = 0, e = RetRegs.size(); 
i != e322
;
++i27
)
1741
27
    MIB.addReg(RetRegs[i], RegState::Implicit);
1742
295
  return true;
1743
299
}
1744
1745
36
bool MipsFastISel::selectTrunc(const Instruction *I) {
1746
36
  // The high bits for a type smaller than the register size are assumed to be
1747
36
  // undefined.
1748
36
  Value *Op = I->getOperand(0);
1749
36
1750
36
  EVT SrcVT, DestVT;
1751
36
  SrcVT = TLI.getValueType(DL, Op->getType(), true);
1752
36
  DestVT = TLI.getValueType(DL, I->getType(), true);
1753
36
1754
36
  if (
SrcVT != MVT::i32 && 36
SrcVT != MVT::i1636
&&
SrcVT != MVT::i836
)
1755
0
    return false;
1756
36
  
if (36
DestVT != MVT::i16 && 36
DestVT != MVT::i836
&&
DestVT != MVT::i136
)
1757
0
    return false;
1758
36
1759
36
  unsigned SrcReg = getRegForValue(Op);
1760
36
  if (!SrcReg)
1761
0
    return false;
1762
36
1763
36
  // Because the high bits are undefined, a truncate doesn't generate
1764
36
  // any code.
1765
36
  updateValueMap(I, SrcReg);
1766
36
  return true;
1767
36
}
1768
1769
130
bool MipsFastISel::selectIntExt(const Instruction *I) {
1770
130
  Type *DestTy = I->getType();
1771
130
  Value *Src = I->getOperand(0);
1772
130
  Type *SrcTy = Src->getType();
1773
130
1774
130
  bool isZExt = isa<ZExtInst>(I);
1775
130
  unsigned SrcReg = getRegForValue(Src);
1776
130
  if (!SrcReg)
1777
0
    return false;
1778
130
1779
130
  EVT SrcEVT, DestEVT;
1780
130
  SrcEVT = TLI.getValueType(DL, SrcTy, true);
1781
130
  DestEVT = TLI.getValueType(DL, DestTy, true);
1782
130
  if (!SrcEVT.isSimple())
1783
0
    return false;
1784
130
  
if (130
!DestEVT.isSimple()130
)
1785
0
    return false;
1786
130
1787
130
  MVT SrcVT = SrcEVT.getSimpleVT();
1788
130
  MVT DestVT = DestEVT.getSimpleVT();
1789
130
  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1790
130
1791
130
  if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1792
0
    return false;
1793
130
  updateValueMap(I, ResultReg);
1794
130
  return true;
1795
130
}
1796
1797
bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1798
37
                                   unsigned DestReg) {
1799
37
  unsigned ShiftAmt;
1800
37
  switch (SrcVT.SimpleTy) {
1801
0
  default:
1802
0
    return false;
1803
20
  case MVT::i8:
1804
20
    ShiftAmt = 24;
1805
20
    break;
1806
17
  case MVT::i16:
1807
17
    ShiftAmt = 16;
1808
17
    break;
1809
37
  }
1810
37
  unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1811
37
  emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1812
37
  emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1813
37
  return true;
1814
37
}
1815
1816
bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1817
43
                                   unsigned DestReg) {
1818
43
  switch (SrcVT.SimpleTy) {
1819
0
  default:
1820
0
    return false;
1821
23
  case MVT::i8:
1822
23
    emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1823
23
    break;
1824
20
  case MVT::i16:
1825
20
    emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1826
20
    break;
1827
43
  }
1828
43
  return true;
1829
43
}
1830
1831
bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1832
80
                               unsigned DestReg) {
1833
80
  if (
(DestVT != MVT::i32) && 80
(DestVT != MVT::i16)8
)
1834
0
    return false;
1835
80
  
if (80
Subtarget->hasMips32r2()80
)
1836
43
    return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1837
37
  return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1838
37
}
1839
1840
bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1841
114
                               unsigned DestReg) {
1842
114
  int64_t Imm;
1843
114
1844
114
  switch (SrcVT.SimpleTy) {
1845
0
  default:
1846
0
    return false;
1847
82
  case MVT::i1:
1848
82
    Imm = 1;
1849
82
    break;
1850
18
  case MVT::i8:
1851
18
    Imm = 0xff;
1852
18
    break;
1853
14
  case MVT::i16:
1854
14
    Imm = 0xffff;
1855
14
    break;
1856
114
  }
1857
114
1858
114
  emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1859
114
  return true;
1860
114
}
1861
1862
bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1863
194
                              unsigned DestReg, bool IsZExt) {
1864
194
  // FastISel does not have plumbing to deal with extensions where the SrcVT or
1865
194
  // DestVT are odd things, so test to make sure that they are both types we can
1866
194
  // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1867
194
  // bail out to SelectionDAG.
1868
194
  if (
((DestVT != MVT::i8) && 194
(DestVT != MVT::i16)172
&&
(DestVT != MVT::i32)152
) ||
1869
194
      
((SrcVT != MVT::i1) && 194
(SrcVT != MVT::i8)112
&&
(SrcVT != MVT::i16)51
))
1870
0
    return false;
1871
194
  
if (194
IsZExt194
)
1872
114
    return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1873
80
  return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1874
80
}
1875
1876
unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1877
44
                                  bool isZExt) {
1878
44
  unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1879
44
  bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1880
44
  return Success ? 
DestReg44
:
00
;
1881
44
}
1882
1883
8
bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1884
8
  EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1885
8
  if (!DestEVT.isSimple())
1886
0
    return false;
1887
8
1888
8
  MVT DestVT = DestEVT.getSimpleVT();
1889
8
  if (DestVT != MVT::i32)
1890
0
    return false;
1891
8
1892
8
  unsigned DivOpc;
1893
8
  switch (ISDOpcode) {
1894
0
  default:
1895
0
    return false;
1896
4
  case ISD::SDIV:
1897
4
  case ISD::SREM:
1898
4
    DivOpc = Mips::SDIV;
1899
4
    break;
1900
4
  case ISD::UDIV:
1901
4
  case ISD::UREM:
1902
4
    DivOpc = Mips::UDIV;
1903
4
    break;
1904
8
  }
1905
8
1906
8
  unsigned Src0Reg = getRegForValue(I->getOperand(0));
1907
8
  unsigned Src1Reg = getRegForValue(I->getOperand(1));
1908
8
  if (
!Src0Reg || 8
!Src1Reg8
)
1909
0
    return false;
1910
8
1911
8
  emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1912
8
  emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1913
8
1914
8
  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1915
8
  if (!ResultReg)
1916
0
    return false;
1917
8
1918
8
  
unsigned MFOpc = (ISDOpcode == ISD::SREM || 8
ISDOpcode == ISD::UREM6
)
1919
4
                       ? Mips::MFHI
1920
4
                       : Mips::MFLO;
1921
8
  emitInst(MFOpc, ResultReg);
1922
8
1923
8
  updateValueMap(I, ResultReg);
1924
8
  return true;
1925
8
}
1926
1927
12
bool MipsFastISel::selectShift(const Instruction *I) {
1928
12
  MVT RetVT;
1929
12
1930
12
  if (!isTypeSupported(I->getType(), RetVT))
1931
0
    return false;
1932
12
1933
12
  unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1934
12
  if (!ResultReg)
1935
0
    return false;
1936
12
1937
12
  unsigned Opcode = I->getOpcode();
1938
12
  const Value *Op0 = I->getOperand(0);
1939
12
  unsigned Op0Reg = getRegForValue(Op0);
1940
12
  if (!Op0Reg)
1941
0
    return false;
1942
12
1943
12
  // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1944
12
  
if (12
Opcode == Instruction::AShr || 12
Opcode == Instruction::LShr8
) {
1945
8
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1946
8
    if (!TempReg)
1947
0
      return false;
1948
8
1949
8
    MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1950
8
    bool IsZExt = Opcode == Instruction::LShr;
1951
8
    if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1952
0
      return false;
1953
8
1954
8
    Op0Reg = TempReg;
1955
8
  }
1956
12
1957
12
  
if (const auto *12
C12
= dyn_cast<ConstantInt>(I->getOperand(1))) {
1958
6
    uint64_t ShiftVal = C->getZExtValue();
1959
6
1960
6
    switch (Opcode) {
1961
0
    default:
1962
0
      llvm_unreachable("Unexpected instruction.");
1963
2
    case Instruction::Shl:
1964
2
      Opcode = Mips::SLL;
1965
2
      break;
1966
2
    case Instruction::AShr:
1967
2
      Opcode = Mips::SRA;
1968
2
      break;
1969
2
    case Instruction::LShr:
1970
2
      Opcode = Mips::SRL;
1971
2
      break;
1972
6
    }
1973
6
1974
6
    emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
1975
6
    updateValueMap(I, ResultReg);
1976
6
    return true;
1977
6
  }
1978
6
1979
6
  unsigned Op1Reg = getRegForValue(I->getOperand(1));
1980
6
  if (!Op1Reg)
1981
0
    return false;
1982
6
1983
6
  switch (Opcode) {
1984
0
  default:
1985
0
    llvm_unreachable("Unexpected instruction.");
1986
2
  case Instruction::Shl:
1987
2
    Opcode = Mips::SLLV;
1988
2
    break;
1989
2
  case Instruction::AShr:
1990
2
    Opcode = Mips::SRAV;
1991
2
    break;
1992
2
  case Instruction::LShr:
1993
2
    Opcode = Mips::SRLV;
1994
2
    break;
1995
6
  }
1996
6
1997
6
  emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
1998
6
  updateValueMap(I, ResultReg);
1999
6
  return true;
2000
6
}
2001
2002
1.18k
bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
2003
1.18k
  switch (I->getOpcode()) {
2004
8
  default:
2005
8
    break;
2006
324
  case Instruction::Load:
2007
324
    return selectLoad(I);
2008
257
  case Instruction::Store:
2009
257
    return selectStore(I);
2010
2
  case Instruction::SDiv:
2011
2
    if (!selectBinaryOp(I, ISD::SDIV))
2012
2
      return selectDivRem(I, ISD::SDIV);
2013
0
    return true;
2014
2
  case Instruction::UDiv:
2015
2
    if (!selectBinaryOp(I, ISD::UDIV))
2016
2
      return selectDivRem(I, ISD::UDIV);
2017
0
    return true;
2018
2
  case Instruction::SRem:
2019
2
    if (!selectBinaryOp(I, ISD::SREM))
2020
2
      return selectDivRem(I, ISD::SREM);
2021
0
    return true;
2022
2
  case Instruction::URem:
2023
2
    if (!selectBinaryOp(I, ISD::UREM))
2024
2
      return selectDivRem(I, ISD::UREM);
2025
0
    return true;
2026
12
  case Instruction::Shl:
2027
12
  case Instruction::LShr:
2028
12
  case Instruction::AShr:
2029
12
    return selectShift(I);
2030
42
  case Instruction::And:
2031
42
  case Instruction::Or:
2032
42
  case Instruction::Xor:
2033
42
    return selectLogicalOp(I);
2034
4
  case Instruction::Br:
2035
4
    return selectBranch(I);
2036
299
  case Instruction::Ret:
2037
299
    return selectRet(I);
2038
36
  case Instruction::Trunc:
2039
36
    return selectTrunc(I);
2040
130
  case Instruction::ZExt:
2041
130
  case Instruction::SExt:
2042
130
    return selectIntExt(I);
2043
2
  case Instruction::FPTrunc:
2044
2
    return selectFPTrunc(I);
2045
2
  case Instruction::FPExt:
2046
2
    return selectFPExt(I);
2047
4
  case Instruction::FPToSI:
2048
4
    return selectFPToInt(I, /*isSigned*/ true);
2049
0
  case Instruction::FPToUI:
2050
0
    return selectFPToInt(I, /*isSigned*/ false);
2051
52
  case Instruction::ICmp:
2052
52
  case Instruction::FCmp:
2053
52
    return selectCmp(I);
2054
8
  case Instruction::Select:
2055
8
    return selectSelect(I);
2056
8
  }
2057
8
  return false;
2058
8
}
2059
2060
unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2061
112
                                                           bool IsUnsigned) {
2062
112
  unsigned VReg = getRegForValue(V);
2063
112
  if (VReg == 0)
2064
0
    return 0;
2065
112
  MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2066
112
  if (
(VMVT == MVT::i8) || 112
(VMVT == MVT::i16)110
) {
2067
4
    unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
2068
4
    if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2069
0
      return 0;
2070
4
    VReg = TempReg;
2071
4
  }
2072
112
  return VReg;
2073
112
}
2074
2075
566
void MipsFastISel::simplifyAddress(Address &Addr) {
2076
566
  if (
!isInt<16>(Addr.getOffset())566
) {
2077
4
    unsigned TempReg =
2078
4
        materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2079
4
    unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
2080
4
    emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2081
4
    Addr.setReg(DestReg);
2082
4
    Addr.setOffset(0);
2083
4
  }
2084
566
}
2085
2086
unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2087
                                       const TargetRegisterClass *RC,
2088
                                       unsigned Op0, bool Op0IsKill,
2089
21
                                       unsigned Op1, bool Op1IsKill) {
2090
21
  // We treat the MUL instruction in a special way because it clobbers
2091
21
  // the HI0 & LO0 registers. The TableGen definition of this instruction can
2092
21
  // mark these registers only as implicitly defined. As a result, the
2093
21
  // register allocator runs out of registers when this instruction is
2094
21
  // followed by another instruction that defines the same registers too.
2095
21
  // We can fix this by explicitly marking those registers as dead.
2096
21
  if (
MachineInstOpcode == Mips::MUL21
) {
2097
2
    unsigned ResultReg = createResultReg(RC);
2098
2
    const MCInstrDesc &II = TII.get(MachineInstOpcode);
2099
2
    Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2100
2
    Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2101
2
    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2102
2
      .addReg(Op0, getKillRegState(Op0IsKill))
2103
2
      .addReg(Op1, getKillRegState(Op1IsKill))
2104
2
      .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead)
2105
2
      .addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead);
2106
2
    return ResultReg;
2107
2
  }
2108
19
2109
19
  return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1,
2110
19
                                   Op1IsKill);
2111
19
}
2112
2113
namespace llvm {
2114
2115
FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo,
2116
301
                               const TargetLibraryInfo *libInfo) {
2117
301
  return new MipsFastISel(funcInfo, libInfo);
2118
301
}
2119
2120
} // end namespace llvm