Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/X86/X86CallingConv.cpp
Line
Count
Source (jump to first uncovered line)
1
//=== X86CallingConv.cpp - X86 Custom Calling Convention Impl   -*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file contains the implementation of custom routines for the X86
10
// Calling Convention that aren't done by tablegen.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "X86CallingConv.h"
15
#include "X86Subtarget.h"
16
#include "llvm/ADT/SmallVector.h"
17
#include "llvm/CodeGen/CallingConvLower.h"
18
#include "llvm/IR/CallingConv.h"
19
20
using namespace llvm;
21
22
/// When regcall calling convention compiled to 32 bit arch, special treatment
23
/// is required for 64 bit masks.
24
/// The value should be assigned to two GPRs.
25
/// \return true if registers were allocated and false otherwise.
26
static bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT,
27
                                          MVT &LocVT,
28
                                          CCValAssign::LocInfo &LocInfo,
29
                                          ISD::ArgFlagsTy &ArgFlags,
30
30
                                          CCState &State) {
31
30
  // List of GPR registers that are available to store values in regcall
32
30
  // calling convention.
33
30
  static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI,
34
30
                                      X86::ESI};
35
30
36
30
  // The vector will save all the available registers for allocation.
37
30
  SmallVector<unsigned, 5> AvailableRegs;
38
30
39
30
  // searching for the available registers.
40
150
  for (auto Reg : RegList) {
41
150
    if (!State.isAllocated(Reg))
42
58
      AvailableRegs.push_back(Reg);
43
150
  }
44
30
45
30
  const size_t RequiredGprsUponSplit = 2;
46
30
  if (AvailableRegs.size() < RequiredGprsUponSplit)
47
22
    return false; // Not enough free registers - continue the search.
48
8
49
8
  // Allocating the available registers.
50
24
  
for (unsigned I = 0; 8
I < RequiredGprsUponSplit;
I++16
) {
51
16
52
16
    // Marking the register as located.
53
16
    unsigned Reg = State.AllocateReg(AvailableRegs[I]);
54
16
55
16
    // Since we previously made sure that 2 registers are available
56
16
    // we expect that a real register number will be returned.
57
16
    assert(Reg && "Expecting a register will be available");
58
16
59
16
    // Assign the value to the allocated register
60
16
    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
61
16
  }
62
8
63
8
  // Successful in allocating regsiters - stop scanning next rules.
64
8
  return true;
65
8
}
66
67
167
static ArrayRef<MCPhysReg> CC_X86_VectorCallGetSSEs(const MVT &ValVT) {
68
167
  if (ValVT.is512BitVector()) {
69
7
    static const MCPhysReg RegListZMM[] = {X86::ZMM0, X86::ZMM1, X86::ZMM2,
70
7
                                           X86::ZMM3, X86::ZMM4, X86::ZMM5};
71
7
    return makeArrayRef(std::begin(RegListZMM), std::end(RegListZMM));
72
7
  }
73
160
74
160
  if (ValVT.is256BitVector()) {
75
14
    static const MCPhysReg RegListYMM[] = {X86::YMM0, X86::YMM1, X86::YMM2,
76
14
                                           X86::YMM3, X86::YMM4, X86::YMM5};
77
14
    return makeArrayRef(std::begin(RegListYMM), std::end(RegListYMM));
78
14
  }
79
146
80
146
  static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2,
81
146
                                         X86::XMM3, X86::XMM4, X86::XMM5};
82
146
  return makeArrayRef(std::begin(RegListXMM), std::end(RegListXMM));
83
146
}
84
85
54
static ArrayRef<MCPhysReg> CC_X86_64_VectorCallGetGPRs() {
86
54
  static const MCPhysReg RegListGPR[] = {X86::RCX, X86::RDX, X86::R8, X86::R9};
87
54
  return makeArrayRef(std::begin(RegListGPR), std::end(RegListGPR));
88
54
}
89
90
static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT,
91
                                            MVT &LocVT,
92
                                            CCValAssign::LocInfo &LocInfo,
93
                                            ISD::ArgFlagsTy &ArgFlags,
94
58
                                            CCState &State) {
95
58
96
58
  ArrayRef<MCPhysReg> RegList = CC_X86_VectorCallGetSSEs(ValVT);
97
58
  bool Is64bit = static_cast<const X86Subtarget &>(
98
58
                     State.getMachineFunction().getSubtarget())
99
58
                     .is64Bit();
100
58
101
177
  for (auto Reg : RegList) {
102
177
    // If the register is not marked as allocated - assign to it.
103
177
    if (!State.isAllocated(Reg)) {
104
44
      unsigned AssigedReg = State.AllocateReg(Reg);
105
44
      assert(AssigedReg == Reg && "Expecting a valid register allocation");
106
44
      State.addLoc(
107
44
          CCValAssign::getReg(ValNo, ValVT, AssigedReg, LocVT, LocInfo));
108
44
      return true;
109
44
    }
110
133
    // If the register is marked as shadow allocated - assign to it.
111
133
    if (Is64bit && 
State.IsShadowAllocatedReg(Reg)70
) {
112
14
      State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
113
14
      return true;
114
14
    }
115
133
  }
116
58
117
58
  
llvm_unreachable0
("Clang should ensure that hva marked vectors will have "
118
58
                   "an available register.");
119
58
  
return false0
;
120
58
}
121
122
/// Vectorcall calling convention has special handling for vector types or
123
/// HVA for 64 bit arch.
124
/// For HVAs shadow registers might be allocated on the first pass
125
/// and actual XMM registers are allocated on the second pass.
126
/// For vector types, actual XMM registers are allocated on the first pass.
127
/// \return true if registers were allocated and false otherwise.
128
static bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
129
                                 CCValAssign::LocInfo &LocInfo,
130
180
                                 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
131
180
  // On the second pass, go through the HVAs only.
132
180
  if (ArgFlags.isSecArgPass()) {
133
90
    if (ArgFlags.isHva())
134
29
      return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
135
29
                                             ArgFlags, State);
136
61
    return true;
137
61
  }
138
90
139
90
  // Process only vector types as defined by vectorcall spec:
140
90
  // "A vector type is either a floating-point type, for example,
141
90
  //  a float or double, or an SIMD vector type, for example, __m128 or __m256".
142
90
  if (!(ValVT.isFloatingPoint() ||
143
90
        
(23
ValVT.isVector()23
&&
ValVT.getSizeInBits() >= 1288
))) {
144
15
    // If R9 was already assigned it means that we are after the fourth element
145
15
    // and because this is not an HVA / Vector type, we need to allocate
146
15
    // shadow XMM register.
147
15
    if (State.isAllocated(X86::R9)) {
148
2
      // Assign shadow XMM register.
149
2
      (void)State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT));
150
2
    }
151
15
152
15
    return false;
153
15
  }
154
75
155
75
  if (!ArgFlags.isHva() || 
ArgFlags.isHvaStart()29
) {
156
54
    // Assign shadow GPR register.
157
54
    (void)State.AllocateReg(CC_X86_64_VectorCallGetGPRs());
158
54
159
54
    // Assign XMM register - (shadow for HVA and non-shadow for non HVA).
160
54
    if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
161
50
      // In Vectorcall Calling convention, additional shadow stack can be
162
50
      // created on top of the basic 32 bytes of win64.
163
50
      // It can happen if the fifth or sixth argument is vector type or HVA.
164
50
      // At that case for each argument a shadow stack of 8 bytes is allocated.
165
50
      const TargetRegisterInfo *TRI =
166
50
          State.getMachineFunction().getSubtarget().getRegisterInfo();
167
50
      if (TRI->regsOverlap(Reg, X86::XMM4) ||
168
50
          
TRI->regsOverlap(Reg, X86::XMM5)45
)
169
10
        State.AllocateStack(8, 8);
170
50
171
50
      if (!ArgFlags.isHva()) {
172
42
        State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
173
42
        return true; // Allocated a register - Stop the search.
174
42
      }
175
33
    }
176
54
  }
177
33
178
33
  // If this is an HVA - Stop the search,
179
33
  // otherwise continue the search.
180
33
  return ArgFlags.isHva();
181
33
}
182
183
/// Vectorcall calling convention has special handling for vector types or
184
/// HVA for 32 bit arch.
185
/// For HVAs actual XMM registers are allocated on the second pass.
186
/// For vector types, actual XMM registers are allocated on the first pass.
187
/// \return true if registers were allocated and false otherwise.
188
static bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
189
                                 CCValAssign::LocInfo &LocInfo,
190
244
                                 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
191
244
  // On the second pass, go through the HVAs only.
192
244
  if (ArgFlags.isSecArgPass()) {
193
107
    if (ArgFlags.isHva())
194
29
      return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
195
29
                                             ArgFlags, State);
196
78
    return true;
197
78
  }
198
137
199
137
  // Process only vector types as defined by vectorcall spec:
200
137
  // "A vector type is either a floating point type, for example,
201
137
  //  a float or double, or an SIMD vector type, for example, __m128 or __m256".
202
137
  if (!(ValVT.isFloatingPoint() ||
203
137
        
(63
ValVT.isVector()63
&&
ValVT.getSizeInBits() >= 1288
))) {
204
55
    return false;
205
55
  }
206
82
207
82
  if (ArgFlags.isHva())
208
29
    return true; // If this is an HVA - Stop the search.
209
53
210
53
  // Assign XMM register.
211
53
  if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
212
48
    State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
213
48
    return true;
214
48
  }
215
5
216
5
  // In case we did not find an available XMM register for a vector -
217
5
  // pass it indirectly.
218
5
  // It is similar to CCPassIndirect, with the addition of inreg.
219
5
  if (!ValVT.isFloatingPoint()) {
220
1
    LocVT = MVT::i32;
221
1
    LocInfo = CCValAssign::Indirect;
222
1
    ArgFlags.setInReg();
223
1
  }
224
5
225
5
  return false; // No register was assigned - Continue the search.
226
5
}
227
228
static bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &,
229
                                CCValAssign::LocInfo &, ISD::ArgFlagsTy &,
230
0
                                CCState &) {
231
0
  llvm_unreachable("The AnyReg calling convention is only supported by the "
232
0
                   "stackmap and patchpoint intrinsics.");
233
0
  // gracefully fallback to X86 C calling convention on Release builds.
234
0
  return false;
235
0
}
236
237
static bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
238
                               CCValAssign::LocInfo &LocInfo,
239
123
                               ISD::ArgFlagsTy &ArgFlags, CCState &State) {
240
123
  // This is similar to CCAssignToReg<[EAX, EDX, ECX]>, but makes sure
241
123
  // not to split i64 and double between a register and stack
242
123
  static const MCPhysReg RegList[] = {X86::EAX, X86::EDX, X86::ECX};
243
123
  static const unsigned NumRegs = sizeof(RegList) / sizeof(RegList[0]);
244
123
245
123
  SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
246
123
247
123
  // If this is the first part of an double/i64/i128, or if we're already
248
123
  // in the middle of a split, add to the pending list. If this is not
249
123
  // the end of the split, return, otherwise go on to process the pending
250
123
  // list
251
123
  if (ArgFlags.isSplit() || 
!PendingMembers.empty()102
) {
252
52
    PendingMembers.push_back(
253
52
        CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
254
52
    if (!ArgFlags.isSplitEnd())
255
31
      return true;
256
92
  }
257
92
258
92
  // If there are no pending members, we are not in the middle of a split,
259
92
  // so do the usual inreg stuff.
260
92
  if (PendingMembers.empty()) {
261
71
    if (unsigned Reg = State.AllocateReg(RegList)) {
262
68
      State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
263
68
      return true;
264
68
    }
265
3
    return false;
266
3
  }
267
21
268
21
  assert(ArgFlags.isSplitEnd());
269
21
270
21
  // We now have the entire original argument in PendingMembers, so decide
271
21
  // whether to use registers or the stack.
272
21
  // Per the MCU ABI:
273
21
  // a) To use registers, we need to have enough of them free to contain
274
21
  // the entire argument.
275
21
  // b) We never want to use more than 2 registers for a single argument.
276
21
277
21
  unsigned FirstFree = State.getFirstUnallocated(RegList);
278
21
  bool UseRegs = PendingMembers.size() <= std::min(2U, NumRegs - FirstFree);
279
21
280
52
  for (auto &It : PendingMembers) {
281
52
    if (UseRegs)
282
20
      It.convertToReg(State.AllocateReg(RegList[FirstFree++]));
283
32
    else
284
32
      It.convertToMem(State.AllocateStack(4, 4));
285
52
    State.addLoc(It);
286
52
  }
287
21
288
21
  PendingMembers.clear();
289
21
290
21
  return true;
291
21
}
292
293
/// X86 interrupt handlers can only take one or two stack arguments, but if
294
/// there are two arguments, they are in the opposite order from the standard
295
/// convention. Therefore, we have to look at the argument count up front before
296
/// allocating stack for each argument.
297
static bool CC_X86_Intr(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
298
                        CCValAssign::LocInfo &LocInfo,
299
56
                        ISD::ArgFlagsTy &ArgFlags, CCState &State) {
300
56
  const MachineFunction &MF = State.getMachineFunction();
301
56
  size_t ArgCount = State.getMachineFunction().getFunction().arg_size();
302
56
  bool Is64Bit = static_cast<const X86Subtarget &>(MF.getSubtarget()).is64Bit();
303
56
  unsigned SlotSize = Is64Bit ? 
830
:
426
;
304
56
  unsigned Offset;
305
56
  if (ArgCount == 1 && 
ValNo == 022
) {
306
22
    // If we have one argument, the argument is five stack slots big, at fixed
307
22
    // offset zero.
308
22
    Offset = State.AllocateStack(5 * SlotSize, 4);
309
34
  } else if (ArgCount == 2 && ValNo == 0) {
310
17
    // If we have two arguments, the stack slot is *after* the error code
311
17
    // argument. Pretend it doesn't consume stack space, and account for it when
312
17
    // we assign the second argument.
313
17
    Offset = SlotSize;
314
17
  } else if (ArgCount == 2 && ValNo == 1) {
315
17
    // If this is the second of two arguments, it must be the error code. It
316
17
    // appears first on the stack, and is then followed by the five slot
317
17
    // interrupt struct.
318
17
    Offset = 0;
319
17
    (void)State.AllocateStack(6 * SlotSize, 4);
320
17
  } else {
321
0
    report_fatal_error("unsupported x86 interrupt prototype");
322
0
  }
323
56
324
56
  // FIXME: This should be accounted for in
325
56
  // X86FrameLowering::getFrameIndexReference, not here.
326
56
  if (Is64Bit && 
ArgCount == 230
)
327
18
    Offset += SlotSize;
328
56
329
56
  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
330
56
  return true;
331
56
}
332
333
// Provides entry points of CC_X86 and RetCC_X86.
334
#include "X86GenCallingConv.inc"