/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include "MCTargetDesc/X86BaseInfo.h" |
10 | | #include "MCTargetDesc/X86FixupKinds.h" |
11 | | #include "llvm/ADT/StringSwitch.h" |
12 | | #include "llvm/BinaryFormat/ELF.h" |
13 | | #include "llvm/BinaryFormat/MachO.h" |
14 | | #include "llvm/MC/MCAsmBackend.h" |
15 | | #include "llvm/MC/MCDwarf.h" |
16 | | #include "llvm/MC/MCELFObjectWriter.h" |
17 | | #include "llvm/MC/MCExpr.h" |
18 | | #include "llvm/MC/MCFixupKindInfo.h" |
19 | | #include "llvm/MC/MCInst.h" |
20 | | #include "llvm/MC/MCMachObjectWriter.h" |
21 | | #include "llvm/MC/MCObjectWriter.h" |
22 | | #include "llvm/MC/MCRegisterInfo.h" |
23 | | #include "llvm/MC/MCSectionMachO.h" |
24 | | #include "llvm/MC/MCSubtargetInfo.h" |
25 | | #include "llvm/Support/ErrorHandling.h" |
26 | | #include "llvm/Support/raw_ostream.h" |
27 | | using namespace llvm; |
28 | | |
29 | 1.15M | static unsigned getFixupKindSize(unsigned Kind) { |
30 | 1.15M | switch (Kind) { |
31 | 1.15M | default: |
32 | 0 | llvm_unreachable("invalid fixup kind!"); |
33 | 1.15M | case FK_NONE: |
34 | 16 | return 0; |
35 | 1.15M | case FK_PCRel_1: |
36 | 118k | case FK_SecRel_1: |
37 | 118k | case FK_Data_1: |
38 | 118k | return 1; |
39 | 118k | case FK_PCRel_2: |
40 | 2.60k | case FK_SecRel_2: |
41 | 2.60k | case FK_Data_2: |
42 | 2.60k | return 2; |
43 | 492k | case FK_PCRel_4: |
44 | 492k | case X86::reloc_riprel_4byte: |
45 | 492k | case X86::reloc_riprel_4byte_relax: |
46 | 492k | case X86::reloc_riprel_4byte_relax_rex: |
47 | 492k | case X86::reloc_riprel_4byte_movq_load: |
48 | 492k | case X86::reloc_signed_4byte: |
49 | 492k | case X86::reloc_signed_4byte_relax: |
50 | 492k | case X86::reloc_global_offset_table: |
51 | 492k | case X86::reloc_branch_4byte_pcrel: |
52 | 492k | case FK_SecRel_4: |
53 | 492k | case FK_Data_4: |
54 | 492k | return 4; |
55 | 538k | case FK_PCRel_8: |
56 | 538k | case FK_SecRel_8: |
57 | 538k | case FK_Data_8: |
58 | 538k | case X86::reloc_global_offset_table8: |
59 | 538k | return 8; |
60 | 1.15M | } |
61 | 1.15M | } |
62 | | |
63 | | namespace { |
64 | | |
65 | | class X86ELFObjectWriter : public MCELFObjectTargetWriter { |
66 | | public: |
67 | | X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine, |
68 | | bool HasRelocationAddend, bool foobar) |
69 | 0 | : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {} |
70 | | }; |
71 | | |
72 | | class X86AsmBackend : public MCAsmBackend { |
73 | | const MCSubtargetInfo &STI; |
74 | | public: |
75 | | X86AsmBackend(const Target &T, const MCSubtargetInfo &STI) |
76 | 15.2k | : MCAsmBackend(support::little), STI(STI) {} |
77 | | |
78 | 0 | unsigned getNumFixupKinds() const override { |
79 | 0 | return X86::NumTargetFixupKinds; |
80 | 0 | } |
81 | | |
82 | | Optional<MCFixupKind> getFixupKind(StringRef Name) const override; |
83 | | |
84 | 4.02M | const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { |
85 | 4.02M | const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = { |
86 | 4.02M | {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, |
87 | 4.02M | {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, |
88 | 4.02M | {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, |
89 | 4.02M | {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, |
90 | 4.02M | {"reloc_signed_4byte", 0, 32, 0}, |
91 | 4.02M | {"reloc_signed_4byte_relax", 0, 32, 0}, |
92 | 4.02M | {"reloc_global_offset_table", 0, 32, 0}, |
93 | 4.02M | {"reloc_global_offset_table8", 0, 64, 0}, |
94 | 4.02M | {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, |
95 | 4.02M | }; |
96 | 4.02M | |
97 | 4.02M | if (Kind < FirstTargetFixupKind) |
98 | 3.43M | return MCAsmBackend::getFixupKindInfo(Kind); |
99 | 594k | |
100 | 594k | assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && |
101 | 594k | "Invalid kind!"); |
102 | 594k | assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!"); |
103 | 594k | return Infos[Kind - FirstTargetFixupKind]; |
104 | 594k | } |
105 | | |
106 | | bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, |
107 | | const MCValue &Target) override; |
108 | | |
109 | | void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, |
110 | | const MCValue &Target, MutableArrayRef<char> Data, |
111 | | uint64_t Value, bool IsResolved, |
112 | 1.15M | const MCSubtargetInfo *STI) const override { |
113 | 1.15M | unsigned Size = getFixupKindSize(Fixup.getKind()); |
114 | 1.15M | |
115 | 1.15M | assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!"); |
116 | 1.15M | |
117 | 1.15M | // Check that uppper bits are either all zeros or all ones. |
118 | 1.15M | // Specifically ignore overflow/underflow as long as the leakage is |
119 | 1.15M | // limited to the lower bits. This is to remain compatible with |
120 | 1.15M | // other assemblers. |
121 | 1.15M | assert((Size == 0 || isIntN(Size * 8 + 1, Value)) && |
122 | 1.15M | "Value does not fit in the Fixup field"); |
123 | 1.15M | |
124 | 7.55M | for (unsigned i = 0; i != Size; ++i6.40M ) |
125 | 6.40M | Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8)); |
126 | 1.15M | } |
127 | | |
128 | | bool mayNeedRelaxation(const MCInst &Inst, |
129 | | const MCSubtargetInfo &STI) const override; |
130 | | |
131 | | bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, |
132 | | const MCRelaxableFragment *DF, |
133 | | const MCAsmLayout &Layout) const override; |
134 | | |
135 | | void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, |
136 | | MCInst &Res) const override; |
137 | | |
138 | | bool writeNopData(raw_ostream &OS, uint64_t Count) const override; |
139 | | }; |
140 | | } // end anonymous namespace |
141 | | |
142 | 2.41M | static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool is16BitMode) { |
143 | 2.41M | unsigned Op = Inst.getOpcode(); |
144 | 2.41M | switch (Op) { |
145 | 2.41M | default: |
146 | 1.63M | return Op; |
147 | 2.41M | case X86::JCC_1: |
148 | 633k | return (is16BitMode) ? X86::JCC_20 : X86::JCC_4; |
149 | 2.41M | case X86::JMP_1: |
150 | 148k | return (is16BitMode) ? X86::JMP_23 : X86::JMP_4148k ; |
151 | 2.41M | } |
152 | 2.41M | } |
153 | | |
154 | 1.70M | static unsigned getRelaxedOpcodeArith(const MCInst &Inst) { |
155 | 1.70M | unsigned Op = Inst.getOpcode(); |
156 | 1.70M | switch (Op) { |
157 | 1.70M | default: |
158 | 1.60M | return Op; |
159 | 1.70M | |
160 | 1.70M | // IMUL |
161 | 1.70M | case X86::IMUL16rri8: return X86::IMUL16rri4 ; |
162 | 1.70M | case X86::IMUL16rmi8: return X86::IMUL16rmi7 ; |
163 | 1.70M | case X86::IMUL32rri8: return X86::IMUL32rri34 ; |
164 | 1.70M | case X86::IMUL32rmi8: return X86::IMUL32rmi7 ; |
165 | 1.70M | case X86::IMUL64rri8: return X86::IMUL64rri3257 ; |
166 | 1.70M | case X86::IMUL64rmi8: return X86::IMUL64rmi3213 ; |
167 | 1.70M | |
168 | 1.70M | // AND |
169 | 1.70M | case X86::AND16ri8: return X86::AND16ri4 ; |
170 | 1.70M | case X86::AND16mi8: return X86::AND16mi7 ; |
171 | 1.70M | case X86::AND32ri8: return X86::AND32ri6.53k ; |
172 | 1.70M | case X86::AND32mi8: return X86::AND32mi52 ; |
173 | 1.70M | case X86::AND64ri8: return X86::AND64ri321.65k ; |
174 | 1.70M | case X86::AND64mi8: return X86::AND64mi329 ; |
175 | 1.70M | |
176 | 1.70M | // OR |
177 | 1.70M | case X86::OR16ri8: return X86::OR16ri4 ; |
178 | 1.70M | case X86::OR16mi8: return X86::OR16mi7 ; |
179 | 1.70M | case X86::OR32ri8: return X86::OR32ri492 ; |
180 | 1.70M | case X86::OR32mi8: return X86::OR32mi7 ; |
181 | 1.70M | case X86::OR64ri8: return X86::OR64ri32353 ; |
182 | 1.70M | case X86::OR64mi8: return X86::OR64mi327 ; |
183 | 1.70M | |
184 | 1.70M | // XOR |
185 | 1.70M | case X86::XOR16ri8: return X86::XOR16ri4 ; |
186 | 1.70M | case X86::XOR16mi8: return X86::XOR16mi7 ; |
187 | 1.70M | case X86::XOR32ri8: return X86::XOR32ri298 ; |
188 | 1.70M | case X86::XOR32mi8: return X86::XOR32mi7 ; |
189 | 1.70M | case X86::XOR64ri8: return X86::XOR64ri32229 ; |
190 | 1.70M | case X86::XOR64mi8: return X86::XOR64mi327 ; |
191 | 1.70M | |
192 | 1.70M | // ADD |
193 | 1.70M | case X86::ADD16ri8: return X86::ADD16ri4 ; |
194 | 1.70M | case X86::ADD16mi8: return X86::ADD16mi7 ; |
195 | 1.70M | case X86::ADD32ri8: return X86::ADD32ri14.6k ; |
196 | 1.70M | case X86::ADD32mi8: return X86::ADD32mi162 ; |
197 | 1.70M | case X86::ADD64ri8: return X86::ADD64ri3223.9k ; |
198 | 1.70M | case X86::ADD64mi8: return X86::ADD64mi32208 ; |
199 | 1.70M | |
200 | 1.70M | // ADC |
201 | 1.70M | case X86::ADC16ri8: return X86::ADC16ri3 ; |
202 | 1.70M | case X86::ADC16mi8: return X86::ADC16mi3 ; |
203 | 1.70M | case X86::ADC32ri8: return X86::ADC32ri264 ; |
204 | 1.70M | case X86::ADC32mi8: return X86::ADC32mi10 ; |
205 | 1.70M | case X86::ADC64ri8: return X86::ADC64ri32126 ; |
206 | 1.70M | case X86::ADC64mi8: return X86::ADC64mi323 ; |
207 | 1.70M | |
208 | 1.70M | // SUB |
209 | 1.70M | case X86::SUB16ri8: return X86::SUB16ri4 ; |
210 | 1.70M | case X86::SUB16mi8: return X86::SUB16mi7 ; |
211 | 1.70M | case X86::SUB32ri8: return X86::SUB32ri10.0k ; |
212 | 1.70M | case X86::SUB32mi8: return X86::SUB32mi7 ; |
213 | 1.70M | case X86::SUB64ri8: return X86::SUB64ri326.48k ; |
214 | 1.70M | case X86::SUB64mi8: return X86::SUB64mi327 ; |
215 | 1.70M | |
216 | 1.70M | // SBB |
217 | 1.70M | case X86::SBB16ri8: return X86::SBB16ri3 ; |
218 | 1.70M | case X86::SBB16mi8: return X86::SBB16mi3 ; |
219 | 1.70M | case X86::SBB32ri8: return X86::SBB32ri117 ; |
220 | 1.70M | case X86::SBB32mi8: return X86::SBB32mi6 ; |
221 | 1.70M | case X86::SBB64ri8: return X86::SBB64ri3284 ; |
222 | 1.70M | case X86::SBB64mi8: return X86::SBB64mi323 ; |
223 | 1.70M | |
224 | 1.70M | // CMP |
225 | 1.70M | case X86::CMP16ri8: return X86::CMP16ri50 ; |
226 | 1.70M | case X86::CMP16mi8: return X86::CMP16mi197 ; |
227 | 1.70M | case X86::CMP32ri8: return X86::CMP32ri7.04k ; |
228 | 1.70M | case X86::CMP32mi8: return X86::CMP32mi8.14k ; |
229 | 1.70M | case X86::CMP64ri8: return X86::CMP64ri326.46k ; |
230 | 1.70M | case X86::CMP64mi8: return X86::CMP64mi321.97k ; |
231 | 1.70M | |
232 | 1.70M | // PUSH |
233 | 1.70M | case X86::PUSH32i8: return X86::PUSHi329.55k ; |
234 | 1.70M | case X86::PUSH16i8: return X86::PUSHi1613 ; |
235 | 1.70M | case X86::PUSH64i8: return X86::PUSH64i322.73k ; |
236 | 1.70M | } |
237 | 1.70M | } |
238 | | |
239 | 67.4k | static unsigned getRelaxedOpcode(const MCInst &Inst, bool is16BitMode) { |
240 | 67.4k | unsigned R = getRelaxedOpcodeArith(Inst); |
241 | 67.4k | if (R != Inst.getOpcode()) |
242 | 99 | return R; |
243 | 67.3k | return getRelaxedOpcodeBranch(Inst, is16BitMode); |
244 | 67.3k | } |
245 | | |
246 | 10 | Optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const { |
247 | 10 | if (STI.getTargetTriple().isOSBinFormatELF()) { |
248 | 10 | if (STI.getTargetTriple().getArch() == Triple::x86_64) { |
249 | 6 | if (Name == "R_X86_64_NONE") |
250 | 6 | return FK_NONE; |
251 | 4 | } else { |
252 | 4 | if (Name == "R_386_NONE") |
253 | 4 | return FK_NONE; |
254 | 0 | } |
255 | 10 | } |
256 | 0 | return MCAsmBackend::getFixupKind(Name); |
257 | 0 | } |
258 | | |
259 | | bool X86AsmBackend::shouldForceRelocation(const MCAssembler &, |
260 | | const MCFixup &Fixup, |
261 | 1.02M | const MCValue &) { |
262 | 1.02M | return Fixup.getKind() == FK_NONE; |
263 | 1.02M | } |
264 | | |
265 | | bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst, |
266 | 2.35M | const MCSubtargetInfo &STI) const { |
267 | 2.35M | // Branches can always be relaxed in either mode. |
268 | 2.35M | if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode()) |
269 | 714k | return true; |
270 | 1.63M | |
271 | 1.63M | // Check if this instruction is ever relaxable. |
272 | 1.63M | if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode()) |
273 | 1.53M | return false; |
274 | 102k | |
275 | 102k | |
276 | 102k | // Check if the relaxable operand has an expression. For the current set of |
277 | 102k | // relaxable instructions, the relaxable operand is always the last operand. |
278 | 102k | unsigned RelaxableOp = Inst.getNumOperands() - 1; |
279 | 102k | if (Inst.getOperand(RelaxableOp).isExpr()) |
280 | 202 | return true; |
281 | 101k | |
282 | 101k | return false; |
283 | 101k | } |
284 | | |
285 | | bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, |
286 | | uint64_t Value, |
287 | | const MCRelaxableFragment *DF, |
288 | 523k | const MCAsmLayout &Layout) const { |
289 | 523k | // Relax if the value is too big for a (signed) i8. |
290 | 523k | return int64_t(Value) != int64_t(int8_t(Value)); |
291 | 523k | } |
292 | | |
293 | | // FIXME: Can tblgen help at all here to verify there aren't other instructions |
294 | | // we can relax? |
295 | | void X86AsmBackend::relaxInstruction(const MCInst &Inst, |
296 | | const MCSubtargetInfo &STI, |
297 | 67.4k | MCInst &Res) const { |
298 | 67.4k | // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel. |
299 | 67.4k | bool is16BitMode = STI.getFeatureBits()[X86::Mode16Bit]; |
300 | 67.4k | unsigned RelaxedOp = getRelaxedOpcode(Inst, is16BitMode); |
301 | 67.4k | |
302 | 67.4k | if (RelaxedOp == Inst.getOpcode()) { |
303 | 0 | SmallString<256> Tmp; |
304 | 0 | raw_svector_ostream OS(Tmp); |
305 | 0 | Inst.dump_pretty(OS); |
306 | 0 | OS << "\n"; |
307 | 0 | report_fatal_error("unexpected instruction to relax: " + OS.str()); |
308 | 0 | } |
309 | 67.4k | |
310 | 67.4k | Res = Inst; |
311 | 67.4k | Res.setOpcode(RelaxedOp); |
312 | 67.4k | } |
313 | | |
314 | | /// Write a sequence of optimal nops to the output, covering \p Count |
315 | | /// bytes. |
316 | | /// \return - true on success, false on failure |
317 | 41.7k | bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const { |
318 | 41.7k | static const char Nops[10][11] = { |
319 | 41.7k | // nop |
320 | 41.7k | "\x90", |
321 | 41.7k | // xchg %ax,%ax |
322 | 41.7k | "\x66\x90", |
323 | 41.7k | // nopl (%[re]ax) |
324 | 41.7k | "\x0f\x1f\x00", |
325 | 41.7k | // nopl 0(%[re]ax) |
326 | 41.7k | "\x0f\x1f\x40\x00", |
327 | 41.7k | // nopl 0(%[re]ax,%[re]ax,1) |
328 | 41.7k | "\x0f\x1f\x44\x00\x00", |
329 | 41.7k | // nopw 0(%[re]ax,%[re]ax,1) |
330 | 41.7k | "\x66\x0f\x1f\x44\x00\x00", |
331 | 41.7k | // nopl 0L(%[re]ax) |
332 | 41.7k | "\x0f\x1f\x80\x00\x00\x00\x00", |
333 | 41.7k | // nopl 0L(%[re]ax,%[re]ax,1) |
334 | 41.7k | "\x0f\x1f\x84\x00\x00\x00\x00\x00", |
335 | 41.7k | // nopw 0L(%[re]ax,%[re]ax,1) |
336 | 41.7k | "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00", |
337 | 41.7k | // nopw %cs:0L(%[re]ax,%[re]ax,1) |
338 | 41.7k | "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00", |
339 | 41.7k | }; |
340 | 41.7k | |
341 | 41.7k | // This CPU doesn't support long nops. If needed add more. |
342 | 41.7k | // FIXME: We could generated something better than plain 0x90. |
343 | 41.7k | if (!STI.getFeatureBits()[X86::FeatureNOPL]) { |
344 | 22.4k | for (uint64_t i = 0; i < Count; ++i15.4k ) |
345 | 15.4k | OS << '\x90'; |
346 | 6.92k | return true; |
347 | 6.92k | } |
348 | 34.7k | |
349 | 34.7k | // 15-bytes is the longest single NOP instruction, but 10-bytes is |
350 | 34.7k | // commonly the longest that can be efficiently decoded. |
351 | 34.7k | uint64_t MaxNopLength = 10; |
352 | 34.7k | if (STI.getFeatureBits()[X86::ProcIntelSLM]) |
353 | 6 | MaxNopLength = 7; |
354 | 34.7k | else if (STI.getFeatureBits()[X86::FeatureFast15ByteNOP]) |
355 | 28 | MaxNopLength = 15; |
356 | 34.7k | else if (STI.getFeatureBits()[X86::FeatureFast11ByteNOP]) |
357 | 4 | MaxNopLength = 11; |
358 | 34.7k | |
359 | 34.7k | // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining |
360 | 34.7k | // length. |
361 | 110k | do { |
362 | 110k | const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength); |
363 | 110k | const uint8_t Prefixes = ThisNopLength <= 10 ? 0110k : ThisNopLength - 1012 ; |
364 | 110k | for (uint8_t i = 0; i < Prefixes; i++72 ) |
365 | 72 | OS << '\x66'; |
366 | 110k | const uint8_t Rest = ThisNopLength - Prefixes; |
367 | 110k | if (Rest != 0) |
368 | 105k | OS.write(Nops[Rest - 1], Rest); |
369 | 110k | Count -= ThisNopLength; |
370 | 110k | } while (Count != 0); |
371 | 34.7k | |
372 | 34.7k | return true; |
373 | 34.7k | } |
374 | | |
375 | | /* *** */ |
376 | | |
377 | | namespace { |
378 | | |
379 | | class ELFX86AsmBackend : public X86AsmBackend { |
380 | | public: |
381 | | uint8_t OSABI; |
382 | | ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI) |
383 | 8.93k | : X86AsmBackend(T, STI), OSABI(OSABI) {} |
384 | | }; |
385 | | |
386 | | class ELFX86_32AsmBackend : public ELFX86AsmBackend { |
387 | | public: |
388 | | ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, |
389 | | const MCSubtargetInfo &STI) |
390 | 2.00k | : ELFX86AsmBackend(T, OSABI, STI) {} |
391 | | |
392 | | std::unique_ptr<MCObjectTargetWriter> |
393 | 2.00k | createObjectTargetWriter() const override { |
394 | 2.00k | return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386); |
395 | 2.00k | } |
396 | | }; |
397 | | |
398 | | class ELFX86_X32AsmBackend : public ELFX86AsmBackend { |
399 | | public: |
400 | | ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI, |
401 | | const MCSubtargetInfo &STI) |
402 | 61 | : ELFX86AsmBackend(T, OSABI, STI) {} |
403 | | |
404 | | std::unique_ptr<MCObjectTargetWriter> |
405 | 61 | createObjectTargetWriter() const override { |
406 | 61 | return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, |
407 | 61 | ELF::EM_X86_64); |
408 | 61 | } |
409 | | }; |
410 | | |
411 | | class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend { |
412 | | public: |
413 | | ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI, |
414 | | const MCSubtargetInfo &STI) |
415 | 6 | : ELFX86AsmBackend(T, OSABI, STI) {} |
416 | | |
417 | | std::unique_ptr<MCObjectTargetWriter> |
418 | 6 | createObjectTargetWriter() const override { |
419 | 6 | return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, |
420 | 6 | ELF::EM_IAMCU); |
421 | 6 | } |
422 | | }; |
423 | | |
424 | | class ELFX86_64AsmBackend : public ELFX86AsmBackend { |
425 | | public: |
426 | | ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, |
427 | | const MCSubtargetInfo &STI) |
428 | 6.86k | : ELFX86AsmBackend(T, OSABI, STI) {} |
429 | | |
430 | | std::unique_ptr<MCObjectTargetWriter> |
431 | 6.87k | createObjectTargetWriter() const override { |
432 | 6.87k | return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64); |
433 | 6.87k | } |
434 | | }; |
435 | | |
436 | | class WindowsX86AsmBackend : public X86AsmBackend { |
437 | | bool Is64Bit; |
438 | | |
439 | | public: |
440 | | WindowsX86AsmBackend(const Target &T, bool is64Bit, |
441 | | const MCSubtargetInfo &STI) |
442 | | : X86AsmBackend(T, STI) |
443 | 1.08k | , Is64Bit(is64Bit) { |
444 | 1.08k | } |
445 | | |
446 | 10 | Optional<MCFixupKind> getFixupKind(StringRef Name) const override { |
447 | 10 | return StringSwitch<Optional<MCFixupKind>>(Name) |
448 | 10 | .Case("dir32", FK_Data_4) |
449 | 10 | .Case("secrel32", FK_SecRel_4) |
450 | 10 | .Case("secidx", FK_SecRel_2) |
451 | 10 | .Default(MCAsmBackend::getFixupKind(Name)); |
452 | 10 | } |
453 | | |
454 | | std::unique_ptr<MCObjectTargetWriter> |
455 | 1.08k | createObjectTargetWriter() const override { |
456 | 1.08k | return createX86WinCOFFObjectWriter(Is64Bit); |
457 | 1.08k | } |
458 | | }; |
459 | | |
460 | | namespace CU { |
461 | | |
462 | | /// Compact unwind encoding values. |
463 | | enum CompactUnwindEncodings { |
464 | | /// [RE]BP based frame where [RE]BP is pused on the stack immediately after |
465 | | /// the return address, then [RE]SP is moved to [RE]BP. |
466 | | UNWIND_MODE_BP_FRAME = 0x01000000, |
467 | | |
468 | | /// A frameless function with a small constant stack size. |
469 | | UNWIND_MODE_STACK_IMMD = 0x02000000, |
470 | | |
471 | | /// A frameless function with a large constant stack size. |
472 | | UNWIND_MODE_STACK_IND = 0x03000000, |
473 | | |
474 | | /// No compact unwind encoding is available. |
475 | | UNWIND_MODE_DWARF = 0x04000000, |
476 | | |
477 | | /// Mask for encoding the frame registers. |
478 | | UNWIND_BP_FRAME_REGISTERS = 0x00007FFF, |
479 | | |
480 | | /// Mask for encoding the frameless registers. |
481 | | UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF |
482 | | }; |
483 | | |
484 | | } // end CU namespace |
485 | | |
486 | | class DarwinX86AsmBackend : public X86AsmBackend { |
487 | | const MCRegisterInfo &MRI; |
488 | | |
489 | | /// Number of registers that can be saved in a compact unwind encoding. |
490 | | enum { CU_NUM_SAVED_REGS = 6 }; |
491 | | |
492 | | mutable unsigned SavedRegs[CU_NUM_SAVED_REGS]; |
493 | | bool Is64Bit; |
494 | | |
495 | | unsigned OffsetSize; ///< Offset of a "push" instruction. |
496 | | unsigned MoveInstrSize; ///< Size of a "move" instruction. |
497 | | unsigned StackDivide; ///< Amount to adjust stack size by. |
498 | | protected: |
499 | | /// Size of a "push" instruction for the given register. |
500 | 69.3k | unsigned PushInstrSize(unsigned Reg) const { |
501 | 69.3k | switch (Reg) { |
502 | 69.3k | case X86::EBX: |
503 | 38.8k | case X86::ECX: |
504 | 38.8k | case X86::EDX: |
505 | 38.8k | case X86::EDI: |
506 | 38.8k | case X86::ESI: |
507 | 38.8k | case X86::EBP: |
508 | 38.8k | case X86::RBX: |
509 | 38.8k | case X86::RBP: |
510 | 38.8k | return 1; |
511 | 38.8k | case X86::R12: |
512 | 30.5k | case X86::R13: |
513 | 30.5k | case X86::R14: |
514 | 30.5k | case X86::R15: |
515 | 30.5k | return 2; |
516 | 0 | } |
517 | 0 | return 1; |
518 | 0 | } |
519 | | |
520 | | /// Implementation of algorithm to generate the compact unwind encoding |
521 | | /// for the CFI instructions. |
522 | | uint32_t |
523 | 22.7k | generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const { |
524 | 22.7k | if (Instrs.empty()) return 0663 ; |
525 | 22.0k | |
526 | 22.0k | // Reset the saved registers. |
527 | 22.0k | unsigned SavedRegIdx = 0; |
528 | 22.0k | memset(SavedRegs, 0, sizeof(SavedRegs)); |
529 | 22.0k | |
530 | 22.0k | bool HasFP = false; |
531 | 22.0k | |
532 | 22.0k | // Encode that we are using EBP/RBP as the frame pointer. |
533 | 22.0k | uint32_t CompactUnwindEncoding = 0; |
534 | 22.0k | |
535 | 22.0k | unsigned SubtractInstrIdx = Is64Bit ? 319.0k : 23.04k ; |
536 | 22.0k | unsigned InstrOffset = 0; |
537 | 22.0k | unsigned StackAdjust = 0; |
538 | 22.0k | unsigned StackSize = 0; |
539 | 22.0k | unsigned NumDefCFAOffsets = 0; |
540 | 22.0k | |
541 | 135k | for (unsigned i = 0, e = Instrs.size(); i != e; ++i113k ) { |
542 | 113k | const MCCFIInstruction &Inst = Instrs[i]; |
543 | 113k | |
544 | 113k | switch (Inst.getOperation()) { |
545 | 113k | default: |
546 | 76 | // Any other CFI directives indicate a frame that we aren't prepared |
547 | 76 | // to represent via compact unwind, so just bail out. |
548 | 76 | return 0; |
549 | 113k | case MCCFIInstruction::OpDefCfaRegister: { |
550 | 21.4k | // Defines a frame pointer. E.g. |
551 | 21.4k | // |
552 | 21.4k | // movq %rsp, %rbp |
553 | 21.4k | // L0: |
554 | 21.4k | // .cfi_def_cfa_register %rbp |
555 | 21.4k | // |
556 | 21.4k | HasFP = true; |
557 | 21.4k | |
558 | 21.4k | // If the frame pointer is other than esp/rsp, we do not have a way to |
559 | 21.4k | // generate a compact unwinding representation, so bail out. |
560 | 21.4k | if (MRI.getLLVMRegNum(Inst.getRegister(), true) != |
561 | 21.4k | (Is64Bit ? X86::RBP18.4k : X86::EBP2.95k )) |
562 | 1 | return 0; |
563 | 21.4k | |
564 | 21.4k | // Reset the counts. |
565 | 21.4k | memset(SavedRegs, 0, sizeof(SavedRegs)); |
566 | 21.4k | StackAdjust = 0; |
567 | 21.4k | SavedRegIdx = 0; |
568 | 21.4k | InstrOffset += MoveInstrSize; |
569 | 21.4k | break; |
570 | 21.4k | } |
571 | 22.4k | case MCCFIInstruction::OpDefCfaOffset: { |
572 | 22.4k | // Defines a new offset for the CFA. E.g. |
573 | 22.4k | // |
574 | 22.4k | // With frame: |
575 | 22.4k | // |
576 | 22.4k | // pushq %rbp |
577 | 22.4k | // L0: |
578 | 22.4k | // .cfi_def_cfa_offset 16 |
579 | 22.4k | // |
580 | 22.4k | // Without frame: |
581 | 22.4k | // |
582 | 22.4k | // subq $72, %rsp |
583 | 22.4k | // L0: |
584 | 22.4k | // .cfi_def_cfa_offset 80 |
585 | 22.4k | // |
586 | 22.4k | StackSize = std::abs(Inst.getOffset()) / StackDivide; |
587 | 22.4k | ++NumDefCFAOffsets; |
588 | 22.4k | break; |
589 | 21.4k | } |
590 | 69.3k | case MCCFIInstruction::OpOffset: { |
591 | 69.3k | // Defines a "push" of a callee-saved register. E.g. |
592 | 69.3k | // |
593 | 69.3k | // pushq %r15 |
594 | 69.3k | // pushq %r14 |
595 | 69.3k | // pushq %rbx |
596 | 69.3k | // L0: |
597 | 69.3k | // subq $120, %rsp |
598 | 69.3k | // L1: |
599 | 69.3k | // .cfi_offset %rbx, -40 |
600 | 69.3k | // .cfi_offset %r14, -32 |
601 | 69.3k | // .cfi_offset %r15, -24 |
602 | 69.3k | // |
603 | 69.3k | if (SavedRegIdx == CU_NUM_SAVED_REGS) |
604 | 0 | // If there are too many saved registers, we cannot use a compact |
605 | 0 | // unwind encoding. |
606 | 0 | return CU::UNWIND_MODE_DWARF; |
607 | 69.3k | |
608 | 69.3k | unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); |
609 | 69.3k | SavedRegs[SavedRegIdx++] = Reg; |
610 | 69.3k | StackAdjust += OffsetSize; |
611 | 69.3k | InstrOffset += PushInstrSize(Reg); |
612 | 69.3k | break; |
613 | 69.3k | } |
614 | 113k | } |
615 | 113k | } |
616 | 22.0k | |
617 | 22.0k | StackAdjust /= StackDivide; |
618 | 21.9k | |
619 | 21.9k | if (HasFP) { |
620 | 21.4k | if ((StackAdjust & 0xFF) != StackAdjust) |
621 | 0 | // Offset was too big for a compact unwind encoding. |
622 | 0 | return CU::UNWIND_MODE_DWARF; |
623 | 21.4k | |
624 | 21.4k | // Get the encoding of the saved registers when we have a frame pointer. |
625 | 21.4k | uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(); |
626 | 21.4k | if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF0 ; |
627 | 21.4k | |
628 | 21.4k | CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; |
629 | 21.4k | CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; |
630 | 21.4k | CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; |
631 | 21.4k | } else { |
632 | 534 | SubtractInstrIdx += InstrOffset; |
633 | 534 | ++StackAdjust; |
634 | 534 | |
635 | 534 | if ((StackSize & 0xFF) == StackSize) { |
636 | 526 | // Frameless stack with a small stack size. |
637 | 526 | CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; |
638 | 526 | |
639 | 526 | // Encode the stack size. |
640 | 526 | CompactUnwindEncoding |= (StackSize & 0xFF) << 16; |
641 | 526 | } else { |
642 | 8 | if ((StackAdjust & 0x7) != StackAdjust) |
643 | 0 | // The extra stack adjustments are too big for us to handle. |
644 | 0 | return CU::UNWIND_MODE_DWARF; |
645 | 8 | |
646 | 8 | // Frameless stack with an offset too large for us to encode compactly. |
647 | 8 | CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; |
648 | 8 | |
649 | 8 | // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' |
650 | 8 | // instruction. |
651 | 8 | CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; |
652 | 8 | |
653 | 8 | // Encode any extra stack adjustments (done via push instructions). |
654 | 8 | CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; |
655 | 8 | } |
656 | 534 | |
657 | 534 | // Encode the number of registers saved. (Reverse the list first.) |
658 | 534 | std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]); |
659 | 534 | CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; |
660 | 534 | |
661 | 534 | // Get the encoding of the saved registers when we don't have a frame |
662 | 534 | // pointer. |
663 | 534 | uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx); |
664 | 534 | if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF0 ; |
665 | 534 | |
666 | 534 | // Encode the register encoding. |
667 | 534 | CompactUnwindEncoding |= |
668 | 534 | RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; |
669 | 534 | } |
670 | 21.9k | |
671 | 21.9k | return CompactUnwindEncoding; |
672 | 21.9k | } |
673 | | |
674 | | private: |
675 | | /// Get the compact unwind number for a given register. The number |
676 | | /// corresponds to the enum lists in compact_unwind_encoding.h. |
677 | 47.8k | int getCompactUnwindRegNum(unsigned Reg) const { |
678 | 47.8k | static const MCPhysReg CU32BitRegs[7] = { |
679 | 47.8k | X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 |
680 | 47.8k | }; |
681 | 47.8k | static const MCPhysReg CU64BitRegs[] = { |
682 | 47.8k | X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 |
683 | 47.8k | }; |
684 | 47.8k | const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs42.8k : CU32BitRegs5.03k ; |
685 | 143k | for (int Idx = 1; *CURegs; ++CURegs, ++Idx95.3k ) |
686 | 143k | if (*CURegs == Reg) |
687 | 47.8k | return Idx; |
688 | 47.8k | |
689 | 47.8k | return -10 ; |
690 | 47.8k | } |
691 | | |
692 | | /// Return the registers encoded for a compact encoding with a frame |
693 | | /// pointer. |
694 | 21.4k | uint32_t encodeCompactUnwindRegistersWithFrame() const { |
695 | 21.4k | // Encode the registers in the order they were saved --- 3-bits per |
696 | 21.4k | // register. The list of saved registers is assumed to be in reverse |
697 | 21.4k | // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS. |
698 | 21.4k | uint32_t RegEnc = 0; |
699 | 68.8k | for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i47.4k ) { |
700 | 68.8k | unsigned Reg = SavedRegs[i]; |
701 | 68.8k | if (Reg == 0) break21.4k ; |
702 | 47.4k | |
703 | 47.4k | int CURegNum = getCompactUnwindRegNum(Reg); |
704 | 47.4k | if (CURegNum == -1) return ~0U0 ; |
705 | 47.4k | |
706 | 47.4k | // Encode the 3-bit register number in order, skipping over 3-bits for |
707 | 47.4k | // each register. |
708 | 47.4k | RegEnc |= (CURegNum & 0x7) << (Idx++ * 3); |
709 | 47.4k | } |
710 | 21.4k | |
711 | 21.4k | assert((RegEnc & 0x3FFFF) == RegEnc && |
712 | 21.4k | "Invalid compact register encoding!"); |
713 | 21.4k | return RegEnc; |
714 | 21.4k | } |
715 | | |
716 | | /// Create the permutation encoding used with frameless stacks. It is |
717 | | /// passed the number of registers to be saved and an array of the registers |
718 | | /// saved. |
719 | 534 | uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const { |
720 | 534 | // The saved registers are numbered from 1 to 6. In order to encode the |
721 | 534 | // order in which they were saved, we re-number them according to their |
722 | 534 | // place in the register order. The re-numbering is relative to the last |
723 | 534 | // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in |
724 | 534 | // that order: |
725 | 534 | // |
726 | 534 | // Orig Re-Num |
727 | 534 | // ---- ------ |
728 | 534 | // 6 6 |
729 | 534 | // 2 2 |
730 | 534 | // 4 3 |
731 | 534 | // 5 3 |
732 | 534 | // |
733 | 951 | for (unsigned i = 0; i < RegCount; ++i417 ) { |
734 | 417 | int CUReg = getCompactUnwindRegNum(SavedRegs[i]); |
735 | 417 | if (CUReg == -1) return ~0U0 ; |
736 | 417 | SavedRegs[i] = CUReg; |
737 | 417 | } |
738 | 534 | |
739 | 534 | // Reverse the list. |
740 | 534 | std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]); |
741 | 534 | |
742 | 534 | uint32_t RenumRegs[CU_NUM_SAVED_REGS]; |
743 | 951 | for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i417 ){ |
744 | 417 | unsigned Countless = 0; |
745 | 937 | for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j520 ) |
746 | 520 | if (SavedRegs[j] < SavedRegs[i]) |
747 | 459 | ++Countless; |
748 | 417 | |
749 | 417 | RenumRegs[i] = SavedRegs[i] - Countless - 1; |
750 | 417 | } |
751 | 534 | |
752 | 534 | // Take the renumbered values and encode them into a 10-bit number. |
753 | 534 | uint32_t permutationEncoding = 0; |
754 | 534 | switch (RegCount) { |
755 | 534 | case 6: |
756 | 16 | permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] |
757 | 16 | + 6 * RenumRegs[2] + 2 * RenumRegs[3] |
758 | 16 | + RenumRegs[4]; |
759 | 16 | break; |
760 | 534 | case 5: |
761 | 5 | permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] |
762 | 5 | + 6 * RenumRegs[3] + 2 * RenumRegs[4] |
763 | 5 | + RenumRegs[5]; |
764 | 5 | break; |
765 | 534 | case 4: |
766 | 13 | permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] |
767 | 13 | + 3 * RenumRegs[4] + RenumRegs[5]; |
768 | 13 | break; |
769 | 534 | case 3: |
770 | 40 | permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] |
771 | 40 | + RenumRegs[5]; |
772 | 40 | break; |
773 | 534 | case 2: |
774 | 32 | permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; |
775 | 32 | break; |
776 | 534 | case 1: |
777 | 60 | permutationEncoding |= RenumRegs[5]; |
778 | 60 | break; |
779 | 534 | } |
780 | 534 | |
781 | 534 | assert((permutationEncoding & 0x3FF) == permutationEncoding && |
782 | 534 | "Invalid compact register encoding!"); |
783 | 534 | return permutationEncoding; |
784 | 534 | } |
785 | | |
786 | | public: |
787 | | DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, |
788 | | const MCSubtargetInfo &STI, bool Is64Bit) |
789 | 5.27k | : X86AsmBackend(T, STI), MRI(MRI), Is64Bit(Is64Bit) { |
790 | 5.27k | memset(SavedRegs, 0, sizeof(SavedRegs)); |
791 | 5.27k | OffsetSize = Is64Bit ? 84.21k : 41.06k ; |
792 | 5.27k | MoveInstrSize = Is64Bit ? 34.21k : 21.06k ; |
793 | 5.27k | StackDivide = Is64Bit ? 84.21k : 41.06k ; |
794 | 5.27k | } |
795 | | }; |
796 | | |
797 | | class DarwinX86_32AsmBackend : public DarwinX86AsmBackend { |
798 | | public: |
799 | | DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI, |
800 | | const MCSubtargetInfo &STI) |
801 | 1.06k | : DarwinX86AsmBackend(T, MRI, STI, false) {} |
802 | | |
803 | | std::unique_ptr<MCObjectTargetWriter> |
804 | 1.06k | createObjectTargetWriter() const override { |
805 | 1.06k | return createX86MachObjectWriter(/*Is64Bit=*/false, |
806 | 1.06k | MachO::CPU_TYPE_I386, |
807 | 1.06k | MachO::CPU_SUBTYPE_I386_ALL); |
808 | 1.06k | } |
809 | | |
810 | | /// Generate the compact unwind encoding for the CFI instructions. |
811 | | uint32_t generateCompactUnwindEncoding( |
812 | 3.07k | ArrayRef<MCCFIInstruction> Instrs) const override { |
813 | 3.07k | return generateCompactUnwindEncodingImpl(Instrs); |
814 | 3.07k | } |
815 | | }; |
816 | | |
817 | | class DarwinX86_64AsmBackend : public DarwinX86AsmBackend { |
818 | | const MachO::CPUSubTypeX86 Subtype; |
819 | | public: |
820 | | DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI, |
821 | | const MCSubtargetInfo &STI, MachO::CPUSubTypeX86 st) |
822 | 4.21k | : DarwinX86AsmBackend(T, MRI, STI, true), Subtype(st) {} |
823 | | |
824 | | std::unique_ptr<MCObjectTargetWriter> |
825 | 4.21k | createObjectTargetWriter() const override { |
826 | 4.21k | return createX86MachObjectWriter(/*Is64Bit=*/true, MachO::CPU_TYPE_X86_64, |
827 | 4.21k | Subtype); |
828 | 4.21k | } |
829 | | |
830 | | /// Generate the compact unwind encoding for the CFI instructions. |
831 | | uint32_t generateCompactUnwindEncoding( |
832 | 19.6k | ArrayRef<MCCFIInstruction> Instrs) const override { |
833 | 19.6k | return generateCompactUnwindEncodingImpl(Instrs); |
834 | 19.6k | } |
835 | | }; |
836 | | |
837 | | } // end anonymous namespace |
838 | | |
839 | | MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, |
840 | | const MCSubtargetInfo &STI, |
841 | | const MCRegisterInfo &MRI, |
842 | 3.48k | const MCTargetOptions &Options) { |
843 | 3.48k | const Triple &TheTriple = STI.getTargetTriple(); |
844 | 3.48k | if (TheTriple.isOSBinFormatMachO()) |
845 | 1.06k | return new DarwinX86_32AsmBackend(T, MRI, STI); |
846 | 2.42k | |
847 | 2.42k | if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF()415 ) |
848 | 414 | return new WindowsX86AsmBackend(T, false, STI); |
849 | 2.00k | |
850 | 2.00k | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
851 | 2.00k | |
852 | 2.00k | if (TheTriple.isOSIAMCU()) |
853 | 6 | return new ELFX86_IAMCUAsmBackend(T, OSABI, STI); |
854 | 2.00k | |
855 | 2.00k | return new ELFX86_32AsmBackend(T, OSABI, STI); |
856 | 2.00k | } |
857 | | |
858 | | MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, |
859 | | const MCSubtargetInfo &STI, |
860 | | const MCRegisterInfo &MRI, |
861 | 11.8k | const MCTargetOptions &Options) { |
862 | 11.8k | const Triple &TheTriple = STI.getTargetTriple(); |
863 | 11.8k | if (TheTriple.isOSBinFormatMachO()) { |
864 | 4.21k | MachO::CPUSubTypeX86 CS = |
865 | 4.21k | StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName()) |
866 | 4.21k | .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H) |
867 | 4.21k | .Default(MachO::CPU_SUBTYPE_X86_64_ALL); |
868 | 4.21k | return new DarwinX86_64AsmBackend(T, MRI, STI, CS); |
869 | 4.21k | } |
870 | 7.60k | |
871 | 7.60k | if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF()674 ) |
872 | 671 | return new WindowsX86AsmBackend(T, true, STI); |
873 | 6.93k | |
874 | 6.93k | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
875 | 6.93k | |
876 | 6.93k | if (TheTriple.getEnvironment() == Triple::GNUX32) |
877 | 61 | return new ELFX86_X32AsmBackend(T, OSABI, STI); |
878 | 6.87k | return new ELFX86_64AsmBackend(T, OSABI, STI); |
879 | 6.87k | } |