/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | /// \file |
11 | | /// \brief The SI code emitter produces machine code that can be executed |
12 | | /// directly on the GPU device. |
13 | | // |
14 | | //===----------------------------------------------------------------------===// |
15 | | |
16 | | #include "AMDGPU.h" |
17 | | #include "MCTargetDesc/AMDGPUFixupKinds.h" |
18 | | #include "MCTargetDesc/AMDGPUMCCodeEmitter.h" |
19 | | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
20 | | #include "Utils/AMDGPUBaseInfo.h" |
21 | | #include "llvm/MC/MCCodeEmitter.h" |
22 | | #include "llvm/MC/MCContext.h" |
23 | | #include "llvm/MC/MCExpr.h" |
24 | | #include "llvm/MC/MCFixup.h" |
25 | | #include "llvm/MC/MCInst.h" |
26 | | #include "llvm/MC/MCInstrDesc.h" |
27 | | #include "llvm/MC/MCInstrInfo.h" |
28 | | #include "llvm/MC/MCRegisterInfo.h" |
29 | | #include "llvm/MC/MCSubtargetInfo.h" |
30 | | #include "llvm/MC/MCSymbol.h" |
31 | | #include "llvm/Support/Casting.h" |
32 | | #include "llvm/Support/ErrorHandling.h" |
33 | | #include "llvm/Support/MathExtras.h" |
34 | | #include "llvm/Support/raw_ostream.h" |
35 | | #include <cassert> |
36 | | #include <cstdint> |
37 | | #include <cstdlib> |
38 | | |
39 | | using namespace llvm; |
40 | | |
41 | | namespace { |
42 | | |
43 | | class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { |
44 | | const MCRegisterInfo &MRI; |
45 | | |
46 | | /// \brief Encode an fp or int literal |
47 | | uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo, |
48 | | const MCSubtargetInfo &STI) const; |
49 | | |
50 | | public: |
51 | | SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri, |
52 | | MCContext &ctx) |
53 | 337 | : AMDGPUMCCodeEmitter(mcii), MRI(mri) {} |
54 | | SIMCCodeEmitter(const SIMCCodeEmitter &) = delete; |
55 | | SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete; |
56 | | |
57 | | /// \brief Encode the instruction and write it to the OS. |
58 | | void encodeInstruction(const MCInst &MI, raw_ostream &OS, |
59 | | SmallVectorImpl<MCFixup> &Fixups, |
60 | | const MCSubtargetInfo &STI) const override; |
61 | | |
62 | | /// \returns the encoding for an MCOperand. |
63 | | uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
64 | | SmallVectorImpl<MCFixup> &Fixups, |
65 | | const MCSubtargetInfo &STI) const override; |
66 | | |
67 | | /// \brief Use a fixup to encode the simm16 field for SOPP branch |
68 | | /// instructions. |
69 | | unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, |
70 | | SmallVectorImpl<MCFixup> &Fixups, |
71 | | const MCSubtargetInfo &STI) const override; |
72 | | |
73 | | unsigned getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, |
74 | | SmallVectorImpl<MCFixup> &Fixups, |
75 | | const MCSubtargetInfo &STI) const override; |
76 | | |
77 | | unsigned getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, |
78 | | SmallVectorImpl<MCFixup> &Fixups, |
79 | | const MCSubtargetInfo &STI) const override; |
80 | | }; |
81 | | |
82 | | } // end anonymous namespace |
83 | | |
84 | | MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII, |
85 | | const MCRegisterInfo &MRI, |
86 | 337 | MCContext &Ctx) { |
87 | 337 | return new SIMCCodeEmitter(MCII, MRI, Ctx); |
88 | 337 | } |
89 | | |
90 | | // Returns the encoding value to use if the given integer is an integer inline |
91 | | // immediate value, or 0 if it is not. |
92 | | template <typename IntTy> |
93 | 49.6k | static uint32_t getIntInlineImmEncoding(IntTy Imm) { |
94 | 49.6k | if (Imm >= 0 && 49.6k Imm <= 6427.5k ) |
95 | 11.1k | return 128 + Imm; |
96 | 38.4k | |
97 | 38.4k | if (38.4k Imm >= -16 && 38.4k Imm <= -125.9k ) |
98 | 9.49k | return 192 + std::abs(Imm); |
99 | 28.9k | |
100 | 28.9k | return 0; |
101 | 28.9k | } SIMCCodeEmitter.cpp:unsigned int getIntInlineImmEncoding<int>(int) Line | Count | Source | 93 | 28.1k | static uint32_t getIntInlineImmEncoding(IntTy Imm) { | 94 | 28.1k | if (Imm >= 0 && 28.1k Imm <= 6415.2k ) | 95 | 6.90k | return 128 + Imm; | 96 | 21.2k | | 97 | 21.2k | if (21.2k Imm >= -16 && 21.2k Imm <= -113.6k ) | 98 | 5.31k | return 192 + std::abs(Imm); | 99 | 15.9k | | 100 | 15.9k | return 0; | 101 | 15.9k | } |
SIMCCodeEmitter.cpp:unsigned int getIntInlineImmEncoding<long long>(long long) Line | Count | Source | 93 | 12.7k | static uint32_t getIntInlineImmEncoding(IntTy Imm) { | 94 | 12.7k | if (Imm >= 0 && 12.7k Imm <= 647.91k ) | 95 | 2.47k | return 128 + Imm; | 96 | 10.2k | | 97 | 10.2k | if (10.2k Imm >= -16 && 10.2k Imm <= -17.87k ) | 98 | 2.43k | return 192 + std::abs(Imm); | 99 | 7.81k | | 100 | 7.81k | return 0; | 101 | 7.81k | } |
SIMCCodeEmitter.cpp:unsigned int getIntInlineImmEncoding<short>(short) Line | Count | Source | 93 | 8.77k | static uint32_t getIntInlineImmEncoding(IntTy Imm) { | 94 | 8.77k | if (Imm >= 0 && 8.77k Imm <= 644.41k ) | 95 | 1.75k | return 128 + Imm; | 96 | 7.02k | | 97 | 7.02k | if (7.02k Imm >= -16 && 7.02k Imm <= -14.40k ) | 98 | 1.74k | return 192 + std::abs(Imm); | 99 | 5.27k | | 100 | 5.27k | return 0; | 101 | 5.27k | } |
|
102 | | |
103 | 8.77k | static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) { |
104 | 8.77k | uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); |
105 | 8.77k | if (IntImm != 0) |
106 | 3.50k | return IntImm; |
107 | 5.27k | |
108 | 5.27k | if (5.27k Val == 0x38005.27k ) // 0.5 |
109 | 1.75k | return 240; |
110 | 3.52k | |
111 | 3.52k | if (3.52k Val == 0xB8003.52k ) // -0.5 |
112 | 10 | return 241; |
113 | 3.51k | |
114 | 3.51k | if (3.51k Val == 0x3C003.51k ) // 1.0 |
115 | 10 | return 242; |
116 | 3.50k | |
117 | 3.50k | if (3.50k Val == 0xBC003.50k ) // -1.0 |
118 | 12 | return 243; |
119 | 3.48k | |
120 | 3.48k | if (3.48k Val == 0x40003.48k ) // 2.0 |
121 | 8 | return 244; |
122 | 3.48k | |
123 | 3.48k | if (3.48k Val == 0xC0003.48k ) // -2.0 |
124 | 10 | return 245; |
125 | 3.47k | |
126 | 3.47k | if (3.47k Val == 0x44003.47k ) // 4.0 |
127 | 10 | return 246; |
128 | 3.46k | |
129 | 3.46k | if (3.46k Val == 0xC4003.46k ) // -4.0 |
130 | 1.73k | return 247; |
131 | 1.72k | |
132 | 1.72k | if (1.72k Val == 0x3118 && // 1.0 / (2.0 * pi) |
133 | 18 | STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) |
134 | 18 | return 248; |
135 | 1.70k | |
136 | 1.70k | return 255; |
137 | 1.70k | } |
138 | | |
139 | 28.1k | static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) { |
140 | 28.1k | uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val)); |
141 | 28.1k | if (IntImm != 0) |
142 | 12.2k | return IntImm; |
143 | 15.9k | |
144 | 15.9k | if (15.9k Val == FloatToBits(0.5f)15.9k ) |
145 | 4.86k | return 240; |
146 | 11.0k | |
147 | 11.0k | if (11.0k Val == FloatToBits(-0.5f)11.0k ) |
148 | 3 | return 241; |
149 | 11.0k | |
150 | 11.0k | if (11.0k Val == FloatToBits(1.0f)11.0k ) |
151 | 47 | return 242; |
152 | 10.9k | |
153 | 10.9k | if (10.9k Val == FloatToBits(-1.0f)10.9k ) |
154 | 66 | return 243; |
155 | 10.9k | |
156 | 10.9k | if (10.9k Val == FloatToBits(2.0f)10.9k ) |
157 | 2 | return 244; |
158 | 10.9k | |
159 | 10.9k | if (10.9k Val == FloatToBits(-2.0f)10.9k ) |
160 | 17 | return 245; |
161 | 10.9k | |
162 | 10.9k | if (10.9k Val == FloatToBits(4.0f)10.9k ) |
163 | 48 | return 246; |
164 | 10.8k | |
165 | 10.8k | if (10.8k Val == FloatToBits(-4.0f)10.8k ) |
166 | 4.75k | return 247; |
167 | 6.10k | |
168 | 6.10k | if (6.10k Val == 0x3e22f983 && // 1.0 / (2.0 * pi) |
169 | 74 | STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) |
170 | 38 | return 248; |
171 | 6.06k | |
172 | 6.06k | return 255; |
173 | 6.06k | } |
174 | | |
175 | 12.7k | static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) { |
176 | 12.7k | uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val)); |
177 | 12.7k | if (IntImm != 0) |
178 | 4.90k | return IntImm; |
179 | 7.81k | |
180 | 7.81k | if (7.81k Val == DoubleToBits(0.5)7.81k ) |
181 | 2.37k | return 240; |
182 | 5.43k | |
183 | 5.43k | if (5.43k Val == DoubleToBits(-0.5)5.43k ) |
184 | 1 | return 241; |
185 | 5.43k | |
186 | 5.43k | if (5.43k Val == DoubleToBits(1.0)5.43k ) |
187 | 4 | return 242; |
188 | 5.42k | |
189 | 5.42k | if (5.42k Val == DoubleToBits(-1.0)5.42k ) |
190 | 35 | return 243; |
191 | 5.39k | |
192 | 5.39k | if (5.39k Val == DoubleToBits(2.0)5.39k ) |
193 | 2 | return 244; |
194 | 5.39k | |
195 | 5.39k | if (5.39k Val == DoubleToBits(-2.0)5.39k ) |
196 | 1 | return 245; |
197 | 5.39k | |
198 | 5.39k | if (5.39k Val == DoubleToBits(4.0)5.39k ) |
199 | 32 | return 246; |
200 | 5.35k | |
201 | 5.35k | if (5.35k Val == DoubleToBits(-4.0)5.35k ) |
202 | 2.33k | return 247; |
203 | 3.02k | |
204 | 3.02k | if (3.02k Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi) |
205 | 21 | STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) |
206 | 21 | return 248; |
207 | 3.00k | |
208 | 3.00k | return 255; |
209 | 3.00k | } |
210 | | |
211 | | uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO, |
212 | | const MCOperandInfo &OpInfo, |
213 | 100k | const MCSubtargetInfo &STI) const { |
214 | 100k | int64_t Imm; |
215 | 100k | if (MO.isExpr()100k ) { |
216 | 228 | const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr()); |
217 | 228 | if (!C) |
218 | 224 | return 255; |
219 | 4 | |
220 | 4 | Imm = C->getValue(); |
221 | 100k | } else { |
222 | 99.9k | |
223 | 99.9k | assert(!MO.isFPImm()); |
224 | 99.9k | |
225 | 99.9k | if (!MO.isImm()) |
226 | 50.3k | return ~0; |
227 | 49.6k | |
228 | 49.6k | Imm = MO.getImm(); |
229 | 49.6k | } |
230 | 100k | |
231 | 49.6k | switch (OpInfo.OperandType) { |
232 | 28.1k | case AMDGPU::OPERAND_REG_IMM_INT32: |
233 | 28.1k | case AMDGPU::OPERAND_REG_IMM_FP32: |
234 | 28.1k | case AMDGPU::OPERAND_REG_INLINE_C_INT32: |
235 | 28.1k | case AMDGPU::OPERAND_REG_INLINE_C_FP32: |
236 | 28.1k | return getLit32Encoding(static_cast<uint32_t>(Imm), STI); |
237 | 28.1k | |
238 | 12.7k | case AMDGPU::OPERAND_REG_IMM_INT64: |
239 | 12.7k | case AMDGPU::OPERAND_REG_IMM_FP64: |
240 | 12.7k | case AMDGPU::OPERAND_REG_INLINE_C_INT64: |
241 | 12.7k | case AMDGPU::OPERAND_REG_INLINE_C_FP64: |
242 | 12.7k | return getLit64Encoding(static_cast<uint64_t>(Imm), STI); |
243 | 12.7k | |
244 | 8.62k | case AMDGPU::OPERAND_REG_IMM_INT16: |
245 | 8.62k | case AMDGPU::OPERAND_REG_IMM_FP16: |
246 | 8.62k | case AMDGPU::OPERAND_REG_INLINE_C_INT16: |
247 | 8.62k | case AMDGPU::OPERAND_REG_INLINE_C_FP16: |
248 | 8.62k | // FIXME Is this correct? What do inline immediates do on SI for f16 src |
249 | 8.62k | // which does not have f16 support? |
250 | 8.62k | return getLit16Encoding(static_cast<uint16_t>(Imm), STI); |
251 | 8.62k | |
252 | 153 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: |
253 | 153 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: { |
254 | 153 | uint16_t Lo16 = static_cast<uint16_t>(Imm); |
255 | 153 | uint32_t Encoding = getLit16Encoding(Lo16, STI); |
256 | 153 | return Encoding; |
257 | 153 | } |
258 | 0 | default: |
259 | 0 | llvm_unreachable("invalid operand size"); |
260 | 0 | } |
261 | 0 | } |
262 | | |
263 | | void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, |
264 | | SmallVectorImpl<MCFixup> &Fixups, |
265 | 186k | const MCSubtargetInfo &STI) const { |
266 | 186k | verifyInstructionPredicates(MI, |
267 | 186k | computeAvailableFeatures(STI.getFeatureBits())); |
268 | 186k | |
269 | 186k | uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI); |
270 | 186k | const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); |
271 | 186k | unsigned bytes = Desc.getSize(); |
272 | 186k | |
273 | 1.44M | for (unsigned i = 0; i < bytes1.44M ; i++1.25M ) { |
274 | 1.25M | OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff)); |
275 | 1.25M | } |
276 | 186k | |
277 | 186k | if (bytes > 4) |
278 | 127k | return; |
279 | 58.5k | |
280 | 58.5k | // Check for additional literals in SRC0/1/2 (Op 1/2/3) |
281 | 181k | for (unsigned i = 0, e = Desc.getNumOperands(); 58.5k i < e181k ; ++i123k ) { |
282 | 128k | |
283 | 128k | // Check if this operand should be encoded as [SV]Src |
284 | 128k | if (!AMDGPU::isSISrcOperand(Desc, i)) |
285 | 62.0k | continue; |
286 | 66.8k | |
287 | 66.8k | // Is this operand a literal immediate? |
288 | 66.8k | const MCOperand &Op = MI.getOperand(i); |
289 | 66.8k | if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255) |
290 | 61.3k | continue; |
291 | 5.50k | |
292 | 5.50k | // Yes! Encode it |
293 | 5.50k | int64_t Imm = 0; |
294 | 5.50k | |
295 | 5.50k | if (Op.isImm()) |
296 | 5.39k | Imm = Op.getImm(); |
297 | 113 | else if (113 Op.isExpr()113 ) { |
298 | 113 | if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr())) |
299 | 1 | Imm = C->getValue(); |
300 | 113 | |
301 | 0 | } else if (0 !Op.isExpr()0 ) // Exprs will be replaced with a fixup value. |
302 | 0 | llvm_unreachable("Must be immediate or expr"); |
303 | 5.50k | |
304 | 27.5k | for (unsigned j = 0; 5.50k j < 427.5k ; j++22.0k ) { |
305 | 22.0k | OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff)); |
306 | 22.0k | } |
307 | 5.50k | |
308 | 5.50k | // Only one literal value allowed |
309 | 5.50k | break; |
310 | 128k | } |
311 | 186k | } |
312 | | |
313 | | unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, |
314 | | SmallVectorImpl<MCFixup> &Fixups, |
315 | 182 | const MCSubtargetInfo &STI) const { |
316 | 182 | const MCOperand &MO = MI.getOperand(OpNo); |
317 | 182 | |
318 | 182 | if (MO.isExpr()182 ) { |
319 | 30 | const MCExpr *Expr = MO.getExpr(); |
320 | 30 | MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br; |
321 | 30 | Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); |
322 | 30 | return 0; |
323 | 30 | } |
324 | 152 | |
325 | 152 | return getMachineOpValue(MI, MO, Fixups, STI); |
326 | 152 | } |
327 | | |
328 | | unsigned |
329 | | SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, |
330 | | SmallVectorImpl<MCFixup> &Fixups, |
331 | 37.2k | const MCSubtargetInfo &STI) const { |
332 | 37.2k | using namespace AMDGPU::SDWA; |
333 | 37.2k | |
334 | 37.2k | uint64_t RegEnc = 0; |
335 | 37.2k | |
336 | 37.2k | const MCOperand &MO = MI.getOperand(OpNo); |
337 | 37.2k | |
338 | 37.2k | unsigned Reg = MO.getReg(); |
339 | 37.2k | RegEnc |= MRI.getEncodingValue(Reg); |
340 | 37.2k | RegEnc &= SDWA9EncValues::SRC_VGPR_MASK; |
341 | 37.2k | if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)37.2k ) { |
342 | 2.90k | RegEnc |= SDWA9EncValues::SRC_SGPR_MASK; |
343 | 2.90k | } |
344 | 37.2k | return RegEnc; |
345 | 37.2k | } |
346 | | |
347 | | unsigned |
348 | | SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, |
349 | | SmallVectorImpl<MCFixup> &Fixups, |
350 | 4.41k | const MCSubtargetInfo &STI) const { |
351 | 4.41k | using namespace AMDGPU::SDWA; |
352 | 4.41k | |
353 | 4.41k | uint64_t RegEnc = 0; |
354 | 4.41k | |
355 | 4.41k | const MCOperand &MO = MI.getOperand(OpNo); |
356 | 4.41k | |
357 | 4.41k | unsigned Reg = MO.getReg(); |
358 | 4.41k | if (Reg != AMDGPU::VCC4.41k ) { |
359 | 4.23k | RegEnc |= MRI.getEncodingValue(Reg); |
360 | 4.23k | RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK; |
361 | 4.23k | RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK; |
362 | 4.23k | } |
363 | 4.41k | return RegEnc; |
364 | 4.41k | } |
365 | | |
366 | 138 | static bool needsPCRel(const MCExpr *Expr) { |
367 | 138 | switch (Expr->getKind()) { |
368 | 97 | case MCExpr::SymbolRef: |
369 | 97 | return true; |
370 | 41 | case MCExpr::Binary: { |
371 | 41 | auto *BE = cast<MCBinaryExpr>(Expr); |
372 | 41 | if (BE->getOpcode() == MCBinaryExpr::Sub) |
373 | 15 | return false; |
374 | 26 | return needsPCRel(BE->getLHS()) || 26 needsPCRel(BE->getRHS())0 ; |
375 | 26 | } |
376 | 0 | case MCExpr::Unary: |
377 | 0 | return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr()); |
378 | 0 | case MCExpr::Target: |
379 | 0 | case MCExpr::Constant: |
380 | 0 | return false; |
381 | 0 | } |
382 | 0 | llvm_unreachable0 ("invalid kind"); |
383 | 0 | } |
384 | | |
385 | | uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI, |
386 | | const MCOperand &MO, |
387 | | SmallVectorImpl<MCFixup> &Fixups, |
388 | 840k | const MCSubtargetInfo &STI) const { |
389 | 840k | if (MO.isReg()) |
390 | 424k | return MRI.getEncodingValue(MO.getReg()); |
391 | 416k | |
392 | 416k | if (416k MO.isExpr() && 416k MO.getExpr()->getKind() != MCExpr::Constant114 ) { |
393 | 112 | // FIXME: If this is expression is PCRel or not should not depend on what |
394 | 112 | // the expression looks like. Given that this is just a general expression, |
395 | 112 | // it should probably be FK_Data_4 and whatever is producing |
396 | 112 | // |
397 | 112 | // s_add_u32 s2, s2, (extern_const_addrspace+16 |
398 | 112 | // |
399 | 112 | // And expecting a PCRel should instead produce |
400 | 112 | // |
401 | 112 | // .Ltmp1: |
402 | 112 | // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1 |
403 | 112 | MCFixupKind Kind; |
404 | 112 | if (needsPCRel(MO.getExpr())) |
405 | 97 | Kind = FK_PCRel_4; |
406 | 112 | else |
407 | 15 | Kind = FK_Data_4; |
408 | 112 | Fixups.push_back(MCFixup::create(4, MO.getExpr(), Kind, MI.getLoc())); |
409 | 112 | } |
410 | 416k | |
411 | 416k | // Figure out the operand number, needed for isSrcOperand check |
412 | 416k | unsigned OpNo = 0; |
413 | 2.13M | for (unsigned e = MI.getNumOperands(); OpNo < e2.13M ; ++OpNo1.71M ) { |
414 | 2.13M | if (&MO == &MI.getOperand(OpNo)) |
415 | 416k | break; |
416 | 2.13M | } |
417 | 416k | |
418 | 416k | const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); |
419 | 416k | if (AMDGPU::isSISrcOperand(Desc, OpNo)416k ) { |
420 | 33.3k | uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); |
421 | 33.3k | if (Enc != ~0U && 33.3k (Enc != 255 || 33.3k Desc.getSize() == 45.50k )) |
422 | 33.3k | return Enc; |
423 | 416k | |
424 | 382k | } else if (382k MO.isImm()382k ) |
425 | 382k | return MO.getImm(); |
426 | 0 |
|
427 | 0 | llvm_unreachable0 ("Encoding of this operand type is not supported yet."); |
428 | 0 | return 0; |
429 | 840k | } |