/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | |
10 | | #include "AArch64.h" |
11 | | #include "AArch64RegisterInfo.h" |
12 | | #include "MCTargetDesc/AArch64FixupKinds.h" |
13 | | #include "llvm/ADT/Triple.h" |
14 | | #include "llvm/BinaryFormat/MachO.h" |
15 | | #include "llvm/MC/MCAsmBackend.h" |
16 | | #include "llvm/MC/MCAssembler.h" |
17 | | #include "llvm/MC/MCContext.h" |
18 | | #include "llvm/MC/MCDirectives.h" |
19 | | #include "llvm/MC/MCELFObjectWriter.h" |
20 | | #include "llvm/MC/MCFixupKindInfo.h" |
21 | | #include "llvm/MC/MCObjectWriter.h" |
22 | | #include "llvm/MC/MCSectionELF.h" |
23 | | #include "llvm/MC/MCSectionMachO.h" |
24 | | #include "llvm/MC/MCValue.h" |
25 | | #include "llvm/Support/ErrorHandling.h" |
26 | | using namespace llvm; |
27 | | |
28 | | namespace { |
29 | | |
30 | | class AArch64AsmBackend : public MCAsmBackend { |
31 | | static const unsigned PCRelFlagVal = |
32 | | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; |
33 | | Triple TheTriple; |
34 | | |
35 | | public: |
36 | | bool IsLittleEndian; |
37 | | |
38 | | public: |
39 | | AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian) |
40 | 14.2k | : MCAsmBackend(), TheTriple(TT), IsLittleEndian(IsLittleEndian) {} |
41 | | |
42 | 0 | unsigned getNumFixupKinds() const override { |
43 | 0 | return AArch64::NumTargetFixupKinds; |
44 | 0 | } |
45 | | |
46 | 22.2M | const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { |
47 | 22.2M | const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { |
48 | 22.2M | // This table *must* be in the order that the fixup_* kinds are defined |
49 | 22.2M | // in AArch64FixupKinds.h. |
50 | 22.2M | // |
51 | 22.2M | // Name Offset (bits) Size (bits) Flags |
52 | 22.2M | {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal}, |
53 | 22.2M | {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal}, |
54 | 22.2M | {"fixup_aarch64_add_imm12", 10, 12, 0}, |
55 | 22.2M | {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0}, |
56 | 22.2M | {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0}, |
57 | 22.2M | {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0}, |
58 | 22.2M | {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0}, |
59 | 22.2M | {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0}, |
60 | 22.2M | {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal}, |
61 | 22.2M | {"fixup_aarch64_movw", 5, 16, 0}, |
62 | 22.2M | {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal}, |
63 | 22.2M | {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal}, |
64 | 22.2M | {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal}, |
65 | 22.2M | {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}, |
66 | 22.2M | {"fixup_aarch64_tlsdesc_call", 0, 0, 0}}; |
67 | 22.2M | |
68 | 22.2M | if (Kind < FirstTargetFixupKind) |
69 | 1.03M | return MCAsmBackend::getFixupKindInfo(Kind); |
70 | 21.1M | |
71 | 22.2M | assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && |
72 | 21.1M | "Invalid kind!"); |
73 | 21.1M | return Infos[Kind - FirstTargetFixupKind]; |
74 | 21.1M | } |
75 | | |
76 | | void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, |
77 | | const MCValue &Target, MutableArrayRef<char> Data, |
78 | | uint64_t Value, bool IsResolved) const override; |
79 | | |
80 | | bool mayNeedRelaxation(const MCInst &Inst) const override; |
81 | | bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, |
82 | | const MCRelaxableFragment *DF, |
83 | | const MCAsmLayout &Layout) const override; |
84 | | void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, |
85 | | MCInst &Res) const override; |
86 | | bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; |
87 | | |
88 | 0 | void HandleAssemblerFlag(MCAssemblerFlag Flag) {} |
89 | | |
90 | 0 | unsigned getPointerSize() const { return 8; } |
91 | | |
92 | | unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const; |
93 | | |
94 | | bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, |
95 | | const MCValue &Target) override; |
96 | | }; |
97 | | |
98 | | } // end anonymous namespace |
99 | | |
100 | | /// \brief The number of bytes the fixup may change. |
101 | 7.38M | static unsigned getFixupKindNumBytes(unsigned Kind) { |
102 | 7.38M | switch (Kind) { |
103 | 0 | default: |
104 | 0 | llvm_unreachable("Unknown fixup kind!"); |
105 | 7.38M | |
106 | 19 | case AArch64::fixup_aarch64_tlsdesc_call: |
107 | 19 | return 0; |
108 | 7.38M | |
109 | 3 | case FK_Data_1: |
110 | 3 | return 1; |
111 | 7.38M | |
112 | 124 | case AArch64::fixup_aarch64_movw: |
113 | 124 | case FK_Data_2: |
114 | 124 | case FK_SecRel_2: |
115 | 124 | return 2; |
116 | 124 | |
117 | 3.25M | case AArch64::fixup_aarch64_pcrel_branch14: |
118 | 3.25M | case AArch64::fixup_aarch64_add_imm12: |
119 | 3.25M | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
120 | 3.25M | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
121 | 3.25M | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
122 | 3.25M | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
123 | 3.25M | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
124 | 3.25M | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
125 | 3.25M | case AArch64::fixup_aarch64_pcrel_branch19: |
126 | 3.25M | return 3; |
127 | 3.25M | |
128 | 3.86M | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
129 | 3.86M | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
130 | 3.86M | case AArch64::fixup_aarch64_pcrel_branch26: |
131 | 3.86M | case AArch64::fixup_aarch64_pcrel_call26: |
132 | 3.86M | case FK_Data_4: |
133 | 3.86M | case FK_SecRel_4: |
134 | 3.86M | return 4; |
135 | 3.86M | |
136 | 259k | case FK_Data_8: |
137 | 259k | return 8; |
138 | 0 | } |
139 | 0 | } |
140 | | |
141 | 6 | static unsigned AdrImmBits(unsigned Value) { |
142 | 6 | unsigned lo2 = Value & 0x3; |
143 | 6 | unsigned hi19 = (Value & 0x1ffffc) >> 2; |
144 | 6 | return (hi19 << 5) | (lo2 << 29); |
145 | 6 | } |
146 | | |
147 | | static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, |
148 | | MCContext &Ctx, const Triple &TheTriple, |
149 | 2.56M | bool IsResolved) { |
150 | 2.56M | unsigned Kind = Fixup.getKind(); |
151 | 2.56M | int64_t SignedValue = static_cast<int64_t>(Value); |
152 | 2.56M | switch (Kind) { |
153 | 0 | default: |
154 | 0 | llvm_unreachable("Unknown fixup kind!"); |
155 | 5 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
156 | 5 | if (SignedValue > 2097151 || 5 SignedValue < -20971523 ) |
157 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
158 | 5 | return AdrImmBits(Value & 0x1fffffULL); |
159 | 1 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
160 | 1 | assert(!IsResolved); |
161 | 1 | if (TheTriple.isOSBinFormatCOFF()) |
162 | 1 | return AdrImmBits(Value & 0x1fffffULL); |
163 | 0 | return AdrImmBits((Value & 0x1fffff000ULL) >> 12); |
164 | 1.92M | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
165 | 1.92M | case AArch64::fixup_aarch64_pcrel_branch19: |
166 | 1.92M | // Signed 21-bit immediate |
167 | 1.92M | if (SignedValue > 2097151 || 1.92M SignedValue < -20971521.92M ) |
168 | 5 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
169 | 1.92M | if (Value & 0x3) |
170 | 4 | Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); |
171 | 1.92M | // Low two bits are not encoded. |
172 | 1.92M | return (Value >> 2) & 0x7ffff; |
173 | 96 | case AArch64::fixup_aarch64_add_imm12: |
174 | 96 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
175 | 96 | if (TheTriple.isOSBinFormatCOFF() && 96 !IsResolved3 ) |
176 | 2 | Value &= 0xfff; |
177 | 96 | // Unsigned 12-bit immediate |
178 | 96 | if (Value >= 0x1000) |
179 | 20 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
180 | 96 | return Value; |
181 | 4 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
182 | 4 | if (TheTriple.isOSBinFormatCOFF() && 4 !IsResolved2 ) |
183 | 0 | Value &= 0xfff; |
184 | 4 | // Unsigned 12-bit immediate which gets multiplied by 2 |
185 | 4 | if (Value >= 0x2000) |
186 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
187 | 4 | if (Value & 0x1) |
188 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned"); |
189 | 4 | return Value >> 1; |
190 | 4 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
191 | 4 | if (TheTriple.isOSBinFormatCOFF() && 4 !IsResolved2 ) |
192 | 0 | Value &= 0xfff; |
193 | 4 | // Unsigned 12-bit immediate which gets multiplied by 4 |
194 | 4 | if (Value >= 0x4000) |
195 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
196 | 4 | if (Value & 0x3) |
197 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned"); |
198 | 4 | return Value >> 2; |
199 | 5 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
200 | 5 | if (TheTriple.isOSBinFormatCOFF() && 5 !IsResolved3 ) |
201 | 1 | Value &= 0xfff; |
202 | 5 | // Unsigned 12-bit immediate which gets multiplied by 8 |
203 | 5 | if (Value >= 0x8000) |
204 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
205 | 5 | if (Value & 0x7) |
206 | 4 | Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned"); |
207 | 5 | return Value >> 3; |
208 | 4 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
209 | 4 | if (TheTriple.isOSBinFormatCOFF() && 4 !IsResolved2 ) |
210 | 0 | Value &= 0xfff; |
211 | 4 | // Unsigned 12-bit immediate which gets multiplied by 16 |
212 | 4 | if (Value >= 0x10000) |
213 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
214 | 4 | if (Value & 0xf) |
215 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned"); |
216 | 4 | return Value >> 4; |
217 | 0 | case AArch64::fixup_aarch64_movw: |
218 | 0 | Ctx.reportError(Fixup.getLoc(), |
219 | 0 | "no resolvable MOVZ/MOVK fixups supported yet"); |
220 | 0 | return Value; |
221 | 79.7k | case AArch64::fixup_aarch64_pcrel_branch14: |
222 | 79.7k | // Signed 16-bit immediate |
223 | 79.7k | if (SignedValue > 32767 || 79.7k SignedValue < -3276879.7k ) |
224 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
225 | 79.7k | // Low two bits are not encoded (4-byte alignment assumed). |
226 | 79.7k | if (Value & 0x3) |
227 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); |
228 | 79.7k | return (Value >> 2) & 0x3fff; |
229 | 430k | case AArch64::fixup_aarch64_pcrel_branch26: |
230 | 430k | case AArch64::fixup_aarch64_pcrel_call26: |
231 | 430k | // Signed 28-bit immediate |
232 | 430k | if (SignedValue > 134217727 || 430k SignedValue < -134217728429k ) |
233 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
234 | 430k | // Low two bits are not encoded (4-byte alignment assumed). |
235 | 430k | if (Value & 0x3) |
236 | 2 | Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); |
237 | 430k | return (Value >> 2) & 0x3ffffff; |
238 | 130k | case FK_Data_1: |
239 | 130k | case FK_Data_2: |
240 | 130k | case FK_Data_4: |
241 | 130k | case FK_Data_8: |
242 | 130k | case FK_SecRel_2: |
243 | 130k | case FK_SecRel_4: |
244 | 130k | return Value; |
245 | 0 | } |
246 | 0 | } |
247 | | |
248 | | /// getFixupKindContainereSizeInBytes - The number of bytes of the |
249 | | /// container involved in big endian or 0 if the item is little endian |
250 | 2.56M | unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const { |
251 | 2.56M | if (IsLittleEndian) |
252 | 2.56M | return 0; |
253 | 16 | |
254 | 16 | switch (Kind) { |
255 | 0 | default: |
256 | 0 | llvm_unreachable("Unknown fixup kind!"); |
257 | 16 | |
258 | 0 | case FK_Data_1: |
259 | 0 | return 1; |
260 | 0 | case FK_Data_2: |
261 | 0 | return 2; |
262 | 15 | case FK_Data_4: |
263 | 15 | return 4; |
264 | 0 | case FK_Data_8: |
265 | 0 | return 8; |
266 | 16 | |
267 | 1 | case AArch64::fixup_aarch64_tlsdesc_call: |
268 | 1 | case AArch64::fixup_aarch64_movw: |
269 | 1 | case AArch64::fixup_aarch64_pcrel_branch14: |
270 | 1 | case AArch64::fixup_aarch64_add_imm12: |
271 | 1 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
272 | 1 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
273 | 1 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
274 | 1 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
275 | 1 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
276 | 1 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
277 | 1 | case AArch64::fixup_aarch64_pcrel_branch19: |
278 | 1 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
279 | 1 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
280 | 1 | case AArch64::fixup_aarch64_pcrel_branch26: |
281 | 1 | case AArch64::fixup_aarch64_pcrel_call26: |
282 | 1 | // Instructions are always little endian |
283 | 1 | return 0; |
284 | 0 | } |
285 | 0 | } |
286 | | |
287 | | void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, |
288 | | const MCValue &Target, |
289 | | MutableArrayRef<char> Data, uint64_t Value, |
290 | 7.38M | bool IsResolved) const { |
291 | 7.38M | unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); |
292 | 7.38M | if (!Value) |
293 | 4.81M | return; // Doesn't change encoding. |
294 | 2.56M | MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); |
295 | 2.56M | MCContext &Ctx = Asm.getContext(); |
296 | 2.56M | // Apply any target-specific value adjustments. |
297 | 2.56M | Value = adjustFixupValue(Fixup, Value, Ctx, TheTriple, IsResolved); |
298 | 2.56M | |
299 | 2.56M | // Shift the value into position. |
300 | 2.56M | Value <<= Info.TargetOffset; |
301 | 2.56M | |
302 | 2.56M | unsigned Offset = Fixup.getOffset(); |
303 | 2.56M | assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); |
304 | 2.56M | |
305 | 2.56M | // Used to point to big endian bytes. |
306 | 2.56M | unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind()); |
307 | 2.56M | |
308 | 2.56M | // For each byte of the fragment that the fixup touches, mask in the |
309 | 2.56M | // bits from the fixup value. |
310 | 2.56M | if (FulleSizeInBytes == 02.56M ) { |
311 | 2.56M | // Handle as little-endian |
312 | 11.0M | for (unsigned i = 0; i != NumBytes11.0M ; ++i8.51M ) { |
313 | 8.51M | Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); |
314 | 8.51M | } |
315 | 2.56M | } else { |
316 | 15 | // Handle as big-endian |
317 | 15 | assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!"); |
318 | 15 | assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!"); |
319 | 75 | for (unsigned i = 0; i != NumBytes75 ; ++i60 ) { |
320 | 60 | unsigned Idx = FulleSizeInBytes - 1 - i; |
321 | 60 | Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); |
322 | 60 | } |
323 | 15 | } |
324 | 7.38M | } |
325 | | |
326 | 23.6M | bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { |
327 | 23.6M | return false; |
328 | 23.6M | } |
329 | | |
330 | | bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, |
331 | | uint64_t Value, |
332 | | const MCRelaxableFragment *DF, |
333 | 0 | const MCAsmLayout &Layout) const { |
334 | 0 | // FIXME: This isn't correct for AArch64. Just moving the "generic" logic |
335 | 0 | // into the targets for now. |
336 | 0 | // |
337 | 0 | // Relax if the value is too big for a (signed) i8. |
338 | 0 | return int64_t(Value) != int64_t(int8_t(Value)); |
339 | 0 | } |
340 | | |
341 | | void AArch64AsmBackend::relaxInstruction(const MCInst &Inst, |
342 | | const MCSubtargetInfo &STI, |
343 | 0 | MCInst &Res) const { |
344 | 0 | llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented"); |
345 | 0 | } |
346 | | |
347 | 448k | bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { |
348 | 448k | // If the count is not 4-byte aligned, we must be writing data into the text |
349 | 448k | // section (otherwise we have unaligned instructions, and thus have far |
350 | 448k | // bigger problems), so just write zeros instead. |
351 | 448k | OW->WriteZeros(Count % 4); |
352 | 448k | |
353 | 448k | // We are properly aligned, so write NOPs as requested. |
354 | 448k | Count /= 4; |
355 | 448k | for (uint64_t i = 0; i != Count448k ; ++i19 ) |
356 | 19 | OW->write32(0xd503201f); |
357 | 448k | return true; |
358 | 448k | } |
359 | | |
360 | | bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm, |
361 | | const MCFixup &Fixup, |
362 | 2.50M | const MCValue &Target) { |
363 | 2.50M | // The ADRP instruction adds some multiple of 0x1000 to the current PC & |
364 | 2.50M | // ~0xfff. This means that the required offset to reach a symbol can vary by |
365 | 2.50M | // up to one step depending on where the ADRP is in memory. For example: |
366 | 2.50M | // |
367 | 2.50M | // ADRP x0, there |
368 | 2.50M | // there: |
369 | 2.50M | // |
370 | 2.50M | // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and |
371 | 2.50M | // we'll need that as an offset. At any other address "there" will be in the |
372 | 2.50M | // same page as the ADRP and the instruction should encode 0x0. Assuming the |
373 | 2.50M | // section isn't 0x1000-aligned, we therefore need to delegate this decision |
374 | 2.50M | // to the linker -- a relocation! |
375 | 2.50M | if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21) |
376 | 18 | return true; |
377 | 2.50M | return false; |
378 | 2.50M | } |
379 | | |
380 | | namespace { |
381 | | |
382 | | namespace CU { |
383 | | |
384 | | /// \brief Compact unwind encoding values. |
385 | | enum CompactUnwindEncodings { |
386 | | /// \brief A "frameless" leaf function, where no non-volatile registers are |
387 | | /// saved. The return remains in LR throughout the function. |
388 | | UNWIND_ARM64_MODE_FRAMELESS = 0x02000000, |
389 | | |
390 | | /// \brief No compact unwind encoding available. Instead the low 23-bits of |
391 | | /// the compact unwind encoding is the offset of the DWARF FDE in the |
392 | | /// __eh_frame section. This mode is never used in object files. It is only |
393 | | /// generated by the linker in final linked images, which have only DWARF info |
394 | | /// for a function. |
395 | | UNWIND_ARM64_MODE_DWARF = 0x03000000, |
396 | | |
397 | | /// \brief This is a standard arm64 prologue where FP/LR are immediately |
398 | | /// pushed on the stack, then SP is copied to FP. If there are any |
399 | | /// non-volatile register saved, they are copied into the stack fame in pairs |
400 | | /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the |
401 | | /// five X pairs and four D pairs can be saved, but the memory layout must be |
402 | | /// in register number order. |
403 | | UNWIND_ARM64_MODE_FRAME = 0x04000000, |
404 | | |
405 | | /// \brief Frame register pair encodings. |
406 | | UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001, |
407 | | UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002, |
408 | | UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004, |
409 | | UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008, |
410 | | UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010, |
411 | | UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100, |
412 | | UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200, |
413 | | UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400, |
414 | | UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800 |
415 | | }; |
416 | | |
417 | | } // end CU namespace |
418 | | |
419 | | // FIXME: This should be in a separate file. |
420 | | class DarwinAArch64AsmBackend : public AArch64AsmBackend { |
421 | | const MCRegisterInfo &MRI; |
422 | | |
423 | | /// \brief Encode compact unwind stack adjustment for frameless functions. |
424 | | /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. |
425 | | /// The stack size always needs to be 16 byte aligned. |
426 | 344 | uint32_t encodeStackAdjustment(uint32_t StackSize) const { |
427 | 344 | return (StackSize / 16) << 12; |
428 | 344 | } |
429 | | |
430 | | public: |
431 | | DarwinAArch64AsmBackend(const Target &T, const Triple &TT, |
432 | | const MCRegisterInfo &MRI) |
433 | 13.2k | : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {} |
434 | | |
435 | 12.9k | MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { |
436 | 12.9k | return createAArch64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64, |
437 | 12.9k | MachO::CPU_SUBTYPE_ARM64_ALL); |
438 | 12.9k | } |
439 | | |
440 | | /// \brief Generate the compact unwind encoding from the CFI directives. |
441 | | uint32_t generateCompactUnwindEncoding( |
442 | 44.0k | ArrayRef<MCCFIInstruction> Instrs) const override { |
443 | 44.0k | if (Instrs.empty()) |
444 | 10.0k | return CU::UNWIND_ARM64_MODE_FRAMELESS; |
445 | 34.0k | |
446 | 34.0k | bool HasFP = false; |
447 | 34.0k | unsigned StackSize = 0; |
448 | 34.0k | |
449 | 34.0k | uint32_t CompactUnwindEncoding = 0; |
450 | 148k | for (size_t i = 0, e = Instrs.size(); i != e148k ; ++i114k ) { |
451 | 115k | const MCCFIInstruction &Inst = Instrs[i]; |
452 | 115k | |
453 | 115k | switch (Inst.getOperation()) { |
454 | 0 | default: |
455 | 0 | // Cannot handle this directive: bail out. |
456 | 0 | return CU::UNWIND_ARM64_MODE_DWARF; |
457 | 33.3k | case MCCFIInstruction::OpDefCfa: { |
458 | 33.3k | // Defines a frame pointer. |
459 | 33.3k | assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) == |
460 | 33.3k | AArch64::FP && |
461 | 33.3k | "Invalid frame pointer!"); |
462 | 33.3k | assert(i + 2 < e && "Insufficient CFI instructions to define a frame!"); |
463 | 33.3k | |
464 | 33.3k | const MCCFIInstruction &LRPush = Instrs[++i]; |
465 | 33.3k | assert(LRPush.getOperation() == MCCFIInstruction::OpOffset && |
466 | 33.3k | "Link register not pushed!"); |
467 | 33.3k | const MCCFIInstruction &FPPush = Instrs[++i]; |
468 | 33.3k | assert(FPPush.getOperation() == MCCFIInstruction::OpOffset && |
469 | 33.3k | "Frame pointer not pushed!"); |
470 | 33.3k | |
471 | 33.3k | unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true); |
472 | 33.3k | unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true); |
473 | 33.3k | |
474 | 33.3k | LRReg = getXRegFromWReg(LRReg); |
475 | 33.3k | FPReg = getXRegFromWReg(FPReg); |
476 | 33.3k | |
477 | 33.3k | assert(LRReg == AArch64::LR && FPReg == AArch64::FP && |
478 | 33.3k | "Pushing invalid registers for frame!"); |
479 | 33.3k | |
480 | 33.3k | // Indicate that the function has a frame. |
481 | 33.3k | CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME; |
482 | 33.3k | HasFP = true; |
483 | 33.3k | break; |
484 | 115k | } |
485 | 674 | case MCCFIInstruction::OpDefCfaOffset: { |
486 | 674 | assert(StackSize == 0 && "We already have the CFA offset!"); |
487 | 674 | StackSize = std::abs(Inst.getOffset()); |
488 | 674 | break; |
489 | 115k | } |
490 | 81.1k | case MCCFIInstruction::OpOffset: { |
491 | 81.1k | // Registers are saved in pairs. We expect there to be two consecutive |
492 | 81.1k | // `.cfi_offset' instructions with the appropriate registers specified. |
493 | 81.1k | unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true); |
494 | 81.1k | if (i + 1 == e) |
495 | 0 | return CU::UNWIND_ARM64_MODE_DWARF; |
496 | 81.1k | |
497 | 81.1k | const MCCFIInstruction &Inst2 = Instrs[++i]; |
498 | 81.1k | if (Inst2.getOperation() != MCCFIInstruction::OpOffset) |
499 | 0 | return CU::UNWIND_ARM64_MODE_DWARF; |
500 | 81.1k | unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true); |
501 | 81.1k | |
502 | 81.1k | // N.B. The encodings must be in register number order, and the X |
503 | 81.1k | // registers before the D registers. |
504 | 81.1k | |
505 | 81.1k | // X19/X20 pair = 0x00000001, |
506 | 81.1k | // X21/X22 pair = 0x00000002, |
507 | 81.1k | // X23/X24 pair = 0x00000004, |
508 | 81.1k | // X25/X26 pair = 0x00000008, |
509 | 81.1k | // X27/X28 pair = 0x00000010 |
510 | 81.1k | Reg1 = getXRegFromWReg(Reg1); |
511 | 81.1k | Reg2 = getXRegFromWReg(Reg2); |
512 | 81.1k | |
513 | 81.1k | if (Reg1 == AArch64::X19 && 81.1k Reg2 == AArch64::X2028.3k && |
514 | 28.3k | (CompactUnwindEncoding & 0xF1E) == 0) |
515 | 28.3k | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR; |
516 | 52.7k | else if (52.7k Reg1 == AArch64::X21 && 52.7k Reg2 == AArch64::X2220.1k && |
517 | 20.1k | (CompactUnwindEncoding & 0xF1C) == 0) |
518 | 20.1k | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR; |
519 | 32.5k | else if (32.5k Reg1 == AArch64::X23 && 32.5k Reg2 == AArch64::X2411.9k && |
520 | 11.9k | (CompactUnwindEncoding & 0xF18) == 0) |
521 | 11.9k | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR; |
522 | 20.5k | else if (20.5k Reg1 == AArch64::X25 && 20.5k Reg2 == AArch64::X268.89k && |
523 | 8.89k | (CompactUnwindEncoding & 0xF10) == 0) |
524 | 8.89k | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR; |
525 | 11.6k | else if (11.6k Reg1 == AArch64::X27 && 11.6k Reg2 == AArch64::X287.05k && |
526 | 7.05k | (CompactUnwindEncoding & 0xF00) == 0) |
527 | 7.05k | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR; |
528 | 4.64k | else { |
529 | 4.64k | Reg1 = getDRegFromBReg(Reg1); |
530 | 4.64k | Reg2 = getDRegFromBReg(Reg2); |
531 | 4.64k | |
532 | 4.64k | // D8/D9 pair = 0x00000100, |
533 | 4.64k | // D10/D11 pair = 0x00000200, |
534 | 4.64k | // D12/D13 pair = 0x00000400, |
535 | 4.64k | // D14/D15 pair = 0x00000800 |
536 | 4.64k | if (Reg1 == AArch64::D8 && 4.64k Reg2 == AArch64::D92.45k && |
537 | 2.45k | (CompactUnwindEncoding & 0xE00) == 0) |
538 | 2.45k | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR; |
539 | 2.19k | else if (2.19k Reg1 == AArch64::D10 && 2.19k Reg2 == AArch64::D11968 && |
540 | 968 | (CompactUnwindEncoding & 0xC00) == 0) |
541 | 968 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR; |
542 | 1.22k | else if (1.22k Reg1 == AArch64::D12 && 1.22k Reg2 == AArch64::D13636 && |
543 | 636 | (CompactUnwindEncoding & 0x800) == 0) |
544 | 636 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR; |
545 | 590 | else if (590 Reg1 == AArch64::D14 && 590 Reg2 == AArch64::D15260 ) |
546 | 260 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR; |
547 | 590 | else |
548 | 590 | // A pair was pushed which we cannot handle. |
549 | 330 | return CU::UNWIND_ARM64_MODE_DWARF; |
550 | 80.8k | } |
551 | 80.8k | |
552 | 80.8k | break; |
553 | 80.8k | } |
554 | 115k | } |
555 | 115k | } |
556 | 34.0k | |
557 | 33.6k | if (33.6k !HasFP33.6k ) { |
558 | 344 | // With compact unwind info we can only represent stack adjustments of up |
559 | 344 | // to 65520 bytes. |
560 | 344 | if (StackSize > 65520) |
561 | 0 | return CU::UNWIND_ARM64_MODE_DWARF; |
562 | 344 | |
563 | 344 | CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS; |
564 | 344 | CompactUnwindEncoding |= encodeStackAdjustment(StackSize); |
565 | 344 | } |
566 | 33.6k | |
567 | 33.6k | return CompactUnwindEncoding; |
568 | 44.0k | } |
569 | | }; |
570 | | |
571 | | } // end anonymous namespace |
572 | | |
573 | | namespace { |
574 | | |
575 | | class ELFAArch64AsmBackend : public AArch64AsmBackend { |
576 | | public: |
577 | | uint8_t OSABI; |
578 | | bool IsILP32; |
579 | | |
580 | | ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI, |
581 | | bool IsLittleEndian, bool IsILP32) |
582 | | : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI), |
583 | 946 | IsILP32(IsILP32) {} |
584 | | |
585 | 159 | MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { |
586 | 159 | return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian, IsILP32); |
587 | 159 | } |
588 | | }; |
589 | | |
590 | | } |
591 | | |
592 | | namespace { |
593 | | class COFFAArch64AsmBackend : public AArch64AsmBackend { |
594 | | public: |
595 | | COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple) |
596 | 10 | : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {} |
597 | | |
598 | 5 | MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { |
599 | 5 | return createAArch64WinCOFFObjectWriter(OS); |
600 | 5 | } |
601 | | }; |
602 | | } |
603 | | |
604 | | MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T, |
605 | | const MCRegisterInfo &MRI, |
606 | | const Triple &TheTriple, |
607 | | StringRef CPU, |
608 | 14.1k | const MCTargetOptions &Options) { |
609 | 14.1k | if (TheTriple.isOSBinFormatMachO()) |
610 | 13.2k | return new DarwinAArch64AsmBackend(T, TheTriple, MRI); |
611 | 930 | |
612 | 930 | if (930 TheTriple.isOSBinFormatCOFF()930 ) |
613 | 10 | return new COFFAArch64AsmBackend(T, TheTriple); |
614 | 920 | |
615 | 930 | assert(TheTriple.isOSBinFormatELF() && "Invalid target"); |
616 | 920 | |
617 | 920 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
618 | 920 | bool IsILP32 = Options.getABIName() == "ilp32"; |
619 | 920 | return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true, |
620 | 920 | IsILP32); |
621 | 920 | } |
622 | | |
623 | | MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T, |
624 | | const MCRegisterInfo &MRI, |
625 | | const Triple &TheTriple, |
626 | | StringRef CPU, |
627 | 26 | const MCTargetOptions &Options) { |
628 | 26 | assert(TheTriple.isOSBinFormatELF() && |
629 | 26 | "Big endian is only supported for ELF targets!"); |
630 | 26 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
631 | 26 | bool IsILP32 = Options.getABIName() == "ilp32"; |
632 | 26 | return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false, |
633 | 26 | IsILP32); |
634 | 26 | } |