Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
Line
Count
Source (jump to first uncovered line)
1
//===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
/// \file
8
//===----------------------------------------------------------------------===//
9
10
#include "MCTargetDesc/AMDGPUFixupKinds.h"
11
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12
#include "llvm/ADT/StringRef.h"
13
#include "llvm/BinaryFormat/ELF.h"
14
#include "llvm/MC/MCAsmBackend.h"
15
#include "llvm/MC/MCAssembler.h"
16
#include "llvm/MC/MCContext.h"
17
#include "llvm/MC/MCFixupKindInfo.h"
18
#include "llvm/MC/MCObjectWriter.h"
19
#include "llvm/MC/MCValue.h"
20
#include "llvm/Support/TargetRegistry.h"
21
#include "Utils/AMDGPUBaseInfo.h"
22
23
using namespace llvm;
24
using namespace llvm::AMDGPU;
25
26
namespace {
27
28
class AMDGPUAsmBackend : public MCAsmBackend {
29
public:
30
3.27k
  AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
31
32
0
  unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
33
34
  void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
35
                  const MCValue &Target, MutableArrayRef<char> Data,
36
                  uint64_t Value, bool IsResolved,
37
                  const MCSubtargetInfo *STI) const override;
38
  bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
39
                            const MCRelaxableFragment *DF,
40
                            const MCAsmLayout &Layout) const override;
41
42
  void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
43
                        MCInst &Res) const override;
44
45
  bool mayNeedRelaxation(const MCInst &Inst,
46
                         const MCSubtargetInfo &STI) const override;
47
48
  unsigned getMinimumNopSize() const override;
49
  bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
50
51
  const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
52
};
53
54
} //End anonymous namespace
55
56
void AMDGPUAsmBackend::relaxInstruction(const MCInst &Inst,
57
                                        const MCSubtargetInfo &STI,
58
3
                                        MCInst &Res) const {
59
3
  unsigned RelaxedOpcode = AMDGPU::getSOPPWithRelaxation(Inst.getOpcode());
60
3
  Res.setOpcode(RelaxedOpcode);
61
3
  Res.addOperand(Inst.getOperand(0));
62
3
  return;
63
3
}
64
65
bool AMDGPUAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
66
                                            uint64_t Value,
67
                                            const MCRelaxableFragment *DF,
68
13
                                            const MCAsmLayout &Layout) const {
69
13
  // if the branch target has an offset of x3f this needs to be relaxed to
70
13
  // add a s_nop 0 immediately after branch to effectively increment offset
71
13
  // for hardware workaround in gfx1010
72
13
  return (((int64_t(Value)/4)-1) == 0x3f);
73
13
}
74
75
bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
76
5.28k
                       const MCSubtargetInfo &STI) const {
77
5.28k
  if (!STI.getFeatureBits()[AMDGPU::FeatureOffset3fBug])
78
4.75k
    return false;
79
527
80
527
  if (AMDGPU::getSOPPWithRelaxation(Inst.getOpcode()) >= 0)
81
20
    return true;
82
507
83
507
  return false;
84
507
}
85
86
247
static unsigned getFixupKindNumBytes(unsigned Kind) {
87
247
  switch (Kind) {
88
247
  case AMDGPU::fixup_si_sopp_br:
89
30
    return 2;
90
247
  case FK_SecRel_1:
91
0
  case FK_Data_1:
92
0
    return 1;
93
0
  case FK_SecRel_2:
94
0
  case FK_Data_2:
95
0
    return 2;
96
217
  case FK_SecRel_4:
97
217
  case FK_Data_4:
98
217
  case FK_PCRel_4:
99
217
    return 4;
100
217
  case FK_SecRel_8:
101
0
  case FK_Data_8:
102
0
    return 8;
103
0
  default:
104
0
    llvm_unreachable("Unknown fixup kind!");
105
247
  }
106
247
}
107
108
static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
109
704
                                 MCContext *Ctx) {
110
704
  int64_t SignedValue = static_cast<int64_t>(Value);
111
704
112
704
  switch (static_cast<unsigned>(Fixup.getKind())) {
113
704
  case AMDGPU::fixup_si_sopp_br: {
114
34
    int64_t BrImm = (SignedValue - 4) / 4;
115
34
116
34
    if (Ctx && !isInt<16>(BrImm))
117
1
      Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
118
34
119
34
    return BrImm;
120
704
  }
121
704
  case FK_Data_1:
122
670
  case FK_Data_2:
123
670
  case FK_Data_4:
124
670
  case FK_Data_8:
125
670
  case FK_PCRel_4:
126
670
  case FK_SecRel_4:
127
670
    return Value;
128
670
  default:
129
0
    llvm_unreachable("unhandled fixup kind");
130
704
  }
131
704
}
132
133
void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
134
                                  const MCValue &Target,
135
                                  MutableArrayRef<char> Data, uint64_t Value,
136
                                  bool IsResolved,
137
704
                                  const MCSubtargetInfo *STI) const {
138
704
  Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
139
704
  if (!Value)
140
457
    return; // Doesn't change encoding.
141
247
142
247
  MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
143
247
144
247
  // Shift the value into position.
145
247
  Value <<= Info.TargetOffset;
146
247
147
247
  unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
148
247
  uint32_t Offset = Fixup.getOffset();
149
247
  assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
150
247
151
247
  // For each byte of the fragment that the fixup touches, mask in the bits from
152
247
  // the fixup value.
153
1.17k
  for (unsigned i = 0; i != NumBytes; 
++i928
)
154
928
    Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
155
247
}
156
157
const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
158
2.49k
                                                       MCFixupKind Kind) const {
159
2.49k
  const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
160
2.49k
    // name                   offset bits  flags
161
2.49k
    { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
162
2.49k
  };
163
2.49k
164
2.49k
  if (Kind < FirstTargetFixupKind)
165
2.18k
    return MCAsmBackend::getFixupKindInfo(Kind);
166
314
167
314
  return Infos[Kind - FirstTargetFixupKind];
168
314
}
169
170
1.00k
unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
171
1.00k
  return 4;
172
1.00k
}
173
174
864
bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
175
864
  // If the count is not 4-byte aligned, we must be writing data into the text
176
864
  // section (otherwise we have unaligned instructions, and thus have far
177
864
  // bigger problems), so just write zeros instead.
178
864
  OS.write_zeros(Count % 4);
179
864
180
864
  // We are properly aligned, so write NOPs as requested.
181
864
  Count /= 4;
182
864
183
864
  // FIXME: R600 support.
184
864
  // s_nop 0
185
864
  const uint32_t Encoded_S_NOP_0 = 0xbf800000;
186
864
187
30.9k
  for (uint64_t I = 0; I != Count; 
++I30.0k
)
188
30.0k
    support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
189
864
190
864
  return true;
191
864
}
192
193
//===----------------------------------------------------------------------===//
194
// ELFAMDGPUAsmBackend class
195
//===----------------------------------------------------------------------===//
196
197
namespace {
198
199
class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
200
  bool Is64Bit;
201
  bool HasRelocationAddend;
202
  uint8_t OSABI = ELF::ELFOSABI_NONE;
203
  uint8_t ABIVersion = 0;
204
205
public:
206
  ELFAMDGPUAsmBackend(const Target &T, const Triple &TT, uint8_t ABIVersion) :
207
      AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
208
      HasRelocationAddend(TT.getOS() == Triple::AMDHSA),
209
3.27k
      ABIVersion(ABIVersion) {
210
3.27k
    switch (TT.getOS()) {
211
3.27k
    case Triple::AMDHSA:
212
532
      OSABI = ELF::ELFOSABI_AMDGPU_HSA;
213
532
      break;
214
3.27k
    case Triple::AMDPAL:
215
97
      OSABI = ELF::ELFOSABI_AMDGPU_PAL;
216
97
      break;
217
3.27k
    case Triple::Mesa3D:
218
91
      OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
219
91
      break;
220
3.27k
    default:
221
2.55k
      break;
222
3.27k
    }
223
3.27k
  }
224
225
  std::unique_ptr<MCObjectTargetWriter>
226
3.27k
  createObjectTargetWriter() const override {
227
3.27k
    return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend,
228
3.27k
                                       ABIVersion);
229
3.27k
  }
230
};
231
232
} // end anonymous namespace
233
234
MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
235
                                           const MCSubtargetInfo &STI,
236
                                           const MCRegisterInfo &MRI,
237
3.27k
                                           const MCTargetOptions &Options) {
238
3.27k
  // Use 64-bit ELF for amdgcn
239
3.27k
  return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple(),
240
3.27k
                                 IsaInfo::hasCodeObjectV3(&STI) ? 
1312
:
02.96k
);
241
3.27k
}