Coverage Report

Created: 2018-01-17 21:32

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/lld/ELF/Arch/AArch64.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- AArch64.cpp --------------------------------------------------------===//
2
//
3
//                             The LLVM Linker
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "Symbols.h"
11
#include "SyntheticSections.h"
12
#include "Target.h"
13
#include "Thunks.h"
14
#include "lld/Common/ErrorHandler.h"
15
#include "llvm/Object/ELF.h"
16
#include "llvm/Support/Endian.h"
17
18
using namespace llvm;
19
using namespace llvm::support::endian;
20
using namespace llvm::ELF;
21
using namespace lld;
22
using namespace lld::elf;
23
24
// Page(Expr) is the page address of the expression Expr, defined
25
// as (Expr & ~0xFFF). (This applies even if the machine page size
26
// supported by the platform has a different value.)
27
96
uint64_t elf::getAArch64Page(uint64_t Expr) {
28
96
  return Expr & ~static_cast<uint64_t>(0xFFF);
29
96
}
30
31
namespace {
32
class AArch64 final : public TargetInfo {
33
public:
34
  AArch64();
35
  RelExpr getRelExpr(RelType Type, const Symbol &S,
36
                     const uint8_t *Loc) const override;
37
  bool isPicRel(RelType Type) const override;
38
  void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
39
  void writePltHeader(uint8_t *Buf) const override;
40
  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
41
                int32_t Index, unsigned RelOff) const override;
42
  bool needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
43
                  uint64_t BranchAddr, const Symbol &S) const override;
44
  bool inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const override;
45
  bool usesOnlyLowPageBits(RelType Type) const override;
46
  void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
47
  RelExpr adjustRelaxExpr(RelType Type, const uint8_t *Data,
48
                          RelExpr Expr) const override;
49
  void relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
50
  void relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
51
  void relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
52
};
53
} // namespace
54
55
44
AArch64::AArch64() {
56
44
  CopyRel = R_AARCH64_COPY;
57
44
  RelativeRel = R_AARCH64_RELATIVE;
58
44
  IRelativeRel = R_AARCH64_IRELATIVE;
59
44
  GotRel = R_AARCH64_GLOB_DAT;
60
44
  PltRel = R_AARCH64_JUMP_SLOT;
61
44
  TlsDescRel = R_AARCH64_TLSDESC;
62
44
  TlsGotRel = R_AARCH64_TLS_TPREL64;
63
44
  GotEntrySize = 8;
64
44
  GotPltEntrySize = 8;
65
44
  PltEntrySize = 16;
66
44
  PltHeaderSize = 32;
67
44
  DefaultMaxPageSize = 65536;
68
44
69
44
  // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
70
44
  // 1 of the tls structures and the tcb size is 16.
71
44
  TcbSize = 16;
72
44
  NeedsThunks = true;
73
44
74
44
  // See comment in Arch/ARM.cpp for a more detailed explanation of
75
44
  // ThunkSectionSpacing. For AArch64 the only branches we are permitted to
76
44
  // Thunk have a range of +/- 128 MiB
77
44
  ThunkSectionSpacing = (128 * 1024 * 1024) - 0x30000;
78
44
}
79
80
RelExpr AArch64::getRelExpr(RelType Type, const Symbol &S,
81
194
                            const uint8_t *Loc) const {
82
194
  switch (Type) {
83
194
  case R_AARCH64_TLSDESC_ADR_PAGE21:
84
1
    return R_TLSDESC_PAGE;
85
194
  case R_AARCH64_TLSDESC_LD64_LO12:
86
2
  case R_AARCH64_TLSDESC_ADD_LO12:
87
2
    return R_TLSDESC;
88
2
  case R_AARCH64_TLSDESC_CALL:
89
1
    return R_TLSDESC_CALL;
90
4
  case R_AARCH64_TLSLE_ADD_TPREL_HI12:
91
4
  case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
92
4
    return R_TLS;
93
22
  case R_AARCH64_CALL26:
94
22
  case R_AARCH64_CONDBR19:
95
22
  case R_AARCH64_JUMP26:
96
22
  case R_AARCH64_TSTBR14:
97
22
    return R_PLT_PC;
98
22
  case R_AARCH64_PREL16:
99
14
  case R_AARCH64_PREL32:
100
14
  case R_AARCH64_PREL64:
101
14
  case R_AARCH64_ADR_PREL_LO21:
102
14
  case R_AARCH64_LD_PREL_LO19:
103
14
    return R_PC;
104
32
  case R_AARCH64_ADR_PREL_PG_HI21:
105
32
    return R_PAGE_PC;
106
22
  case R_AARCH64_LD64_GOT_LO12_NC:
107
22
  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
108
22
    return R_GOT;
109
22
  case R_AARCH64_ADR_GOT_PAGE:
110
6
  case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
111
6
    return R_GOT_PAGE_PC;
112
6
  case R_AARCH64_NONE:
113
1
    return R_NONE;
114
89
  default:
115
89
    return R_ABS;
116
0
  }
117
0
}
118
119
RelExpr AArch64::adjustRelaxExpr(RelType Type, const uint8_t *Data,
120
4
                                 RelExpr Expr) const {
121
4
  if (Expr == R_RELAX_TLS_GD_TO_IE) {
122
0
    if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
123
0
      return R_RELAX_TLS_GD_TO_IE_PAGE_PC;
124
0
    return R_RELAX_TLS_GD_TO_IE_ABS;
125
0
  }
126
4
  return Expr;
127
4
}
128
129
69
bool AArch64::usesOnlyLowPageBits(RelType Type) const {
130
69
  switch (Type) {
131
69
  default:
132
48
    return false;
133
69
  case R_AARCH64_ADD_ABS_LO12_NC:
134
21
  case R_AARCH64_LD64_GOT_LO12_NC:
135
21
  case R_AARCH64_LDST128_ABS_LO12_NC:
136
21
  case R_AARCH64_LDST16_ABS_LO12_NC:
137
21
  case R_AARCH64_LDST32_ABS_LO12_NC:
138
21
  case R_AARCH64_LDST64_ABS_LO12_NC:
139
21
  case R_AARCH64_LDST8_ABS_LO12_NC:
140
21
  case R_AARCH64_TLSDESC_ADD_LO12:
141
21
  case R_AARCH64_TLSDESC_LD64_LO12:
142
21
  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
143
21
    return true;
144
0
  }
145
0
}
146
147
7
bool AArch64::isPicRel(RelType Type) const {
148
7
  return Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64;
149
7
}
150
151
10
void AArch64::writeGotPlt(uint8_t *Buf, const Symbol &) const {
152
10
  write64le(Buf, InX::Plt->getVA());
153
10
}
154
155
3
void AArch64::writePltHeader(uint8_t *Buf) const {
156
3
  const uint8_t PltData[] = {
157
3
      0xf0, 0x7b, 0xbf, 0xa9, // stp    x16, x30, [sp,#-16]!
158
3
      0x10, 0x00, 0x00, 0x90, // adrp   x16, Page(&(.plt.got[2]))
159
3
      0x11, 0x02, 0x40, 0xf9, // ldr    x17, [x16, Offset(&(.plt.got[2]))]
160
3
      0x10, 0x02, 0x00, 0x91, // add    x16, x16, Offset(&(.plt.got[2]))
161
3
      0x20, 0x02, 0x1f, 0xd6, // br     x17
162
3
      0x1f, 0x20, 0x03, 0xd5, // nop
163
3
      0x1f, 0x20, 0x03, 0xd5, // nop
164
3
      0x1f, 0x20, 0x03, 0xd5  // nop
165
3
  };
166
3
  memcpy(Buf, PltData, sizeof(PltData));
167
3
168
3
  uint64_t Got = InX::GotPlt->getVA();
169
3
  uint64_t Plt = InX::Plt->getVA();
170
3
  relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
171
3
              getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
172
3
  relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
173
3
  relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
174
3
}
175
176
void AArch64::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
177
                       uint64_t PltEntryAddr, int32_t Index,
178
10
                       unsigned RelOff) const {
179
10
  const uint8_t Inst[] = {
180
10
      0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
181
10
      0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
182
10
      0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
183
10
      0x20, 0x02, 0x1f, 0xd6  // br   x17
184
10
  };
185
10
  memcpy(Buf, Inst, sizeof(Inst));
186
10
187
10
  relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
188
10
              getAArch64Page(GotPltEntryAddr) - getAArch64Page(PltEntryAddr));
189
10
  relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotPltEntryAddr);
190
10
  relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotPltEntryAddr);
191
10
}
192
193
bool AArch64::needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
194
237
                         uint64_t BranchAddr, const Symbol &S) const {
195
237
  // ELF for the ARM 64-bit architecture, section Call and Jump relocations
196
237
  // only permits range extension thunks for R_AARCH64_CALL26 and
197
237
  // R_AARCH64_JUMP26 relocation types.
198
237
  if (Type != R_AARCH64_CALL26 && 
Type != R_AARCH64_JUMP26229
)
199
187
    return false;
200
50
  uint64_t Dst = (Expr == R_PLT_PC) ? 
S.getPltVA()5
:
S.getVA()45
;
201
50
  return !inBranchRange(Type, BranchAddr, Dst);
202
50
}
203
204
58
bool AArch64::inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const {
205
58
  if (Type != R_AARCH64_CALL26 && 
Type != R_AARCH64_JUMP2644
)
206
0
    return true;
207
58
  // The AArch64 call and unconditional branch instructions have a range of
208
58
  // +/- 128 MiB.
209
58
  uint64_t Range = 128 * 1024 * 1024;
210
58
  if (Dst > Src) {
211
54
    // Immediate of branch is signed.
212
54
    Range -= 4;
213
54
    return Dst - Src <= Range;
214
54
  }
215
4
  return Src - Dst <= Range;
216
4
}
217
218
49
static void write32AArch64Addr(uint8_t *L, uint64_t Imm) {
219
49
  uint32_t ImmLo = (Imm & 0x3) << 29;
220
49
  uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
221
49
  uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
222
49
  write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
223
49
}
224
225
// Return the bits [Start, End] from Val shifted Start bits.
226
// For instance, getBits(0xF0, 4, 8) returns 0xF.
227
38
static uint64_t getBits(uint64_t Val, int Start, int End) {
228
38
  uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
229
38
  return (Val >> Start) & Mask;
230
38
}
231
232
165
static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
233
234
// Update the immediate field in a AARCH64 ldr, str, and add instruction.
235
56
static void or32AArch64Imm(uint8_t *L, uint64_t Imm) {
236
56
  or32le(L, (Imm & 0xFFF) << 10);
237
56
}
238
239
237
void AArch64::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
240
237
  switch (Type) {
241
237
  case R_AARCH64_ABS16:
242
6
  case R_AARCH64_PREL16:
243
6
    checkIntUInt<16>(Loc, Val, Type);
244
6
    write16le(Loc, Val);
245
6
    break;
246
7
  case R_AARCH64_ABS32:
247
7
  case R_AARCH64_PREL32:
248
7
    checkIntUInt<32>(Loc, Val, Type);
249
7
    write32le(Loc, Val);
250
7
    break;
251
9
  case R_AARCH64_ABS64:
252
9
  case R_AARCH64_GLOB_DAT:
253
9
  case R_AARCH64_PREL64:
254
9
    write64le(Loc, Val);
255
9
    break;
256
14
  case R_AARCH64_ADD_ABS_LO12_NC:
257
14
    or32AArch64Imm(Loc, Val);
258
14
    break;
259
48
  case R_AARCH64_ADR_GOT_PAGE:
260
48
  case R_AARCH64_ADR_PREL_PG_HI21:
261
48
  case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
262
48
  case R_AARCH64_TLSDESC_ADR_PAGE21:
263
48
    checkInt<33>(Loc, Val, Type);
264
48
    write32AArch64Addr(Loc, Val >> 12);
265
48
    break;
266
48
  case R_AARCH64_ADR_PREL_LO21:
267
1
    checkInt<21>(Loc, Val, Type);
268
1
    write32AArch64Addr(Loc, Val);
269
1
    break;
270
64
  case R_AARCH64_JUMP26:
271
64
    // Normally we would just write the bits of the immediate field, however
272
64
    // when patching instructions for the cpu errata fix -fix-cortex-a53-843419
273
64
    // we want to replace a non-branch instruction with a branch immediate
274
64
    // instruction. By writing all the bits of the instruction including the
275
64
    // opcode and the immediate (0 001 | 01 imm26) we can do this
276
64
    // transformation by placing a R_AARCH64_JUMP26 relocation at the offset of
277
64
    // the instruction we want to patch.
278
64
    write32le(Loc, 0x14000000);
279
64
    LLVM_FALLTHROUGH;
280
71
  case R_AARCH64_CALL26:
281
71
    checkInt<28>(Loc, Val, Type);
282
71
    or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
283
71
    break;
284
64
  case R_AARCH64_CONDBR19:
285
8
  case R_AARCH64_LD_PREL_LO19:
286
8
    checkAlignment<4>(Loc, Val, Type);
287
8
    checkInt<21>(Loc, Val, Type);
288
8
    or32le(Loc, (Val & 0x1FFFFC) << 3);
289
8
    break;
290
21
  case R_AARCH64_LD64_GOT_LO12_NC:
291
21
  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
292
21
  case R_AARCH64_TLSDESC_LD64_LO12:
293
21
    checkAlignment<8>(Loc, Val, Type);
294
21
    or32le(Loc, (Val & 0xFF8) << 7);
295
21
    break;
296
21
  case R_AARCH64_LDST8_ABS_LO12_NC:
297
2
    or32AArch64Imm(Loc, getBits(Val, 0, 11));
298
2
    break;
299
21
  case R_AARCH64_LDST16_ABS_LO12_NC:
300
5
    checkAlignment<2>(Loc, Val, Type);
301
5
    or32AArch64Imm(Loc, getBits(Val, 1, 11));
302
5
    break;
303
21
  case R_AARCH64_LDST32_ABS_LO12_NC:
304
3
    checkAlignment<4>(Loc, Val, Type);
305
3
    or32AArch64Imm(Loc, getBits(Val, 2, 11));
306
3
    break;
307
25
  case R_AARCH64_LDST64_ABS_LO12_NC:
308
25
    checkAlignment<8>(Loc, Val, Type);
309
25
    or32AArch64Imm(Loc, getBits(Val, 3, 11));
310
25
    break;
311
21
  case R_AARCH64_LDST128_ABS_LO12_NC:
312
3
    checkAlignment<16>(Loc, Val, Type);
313
3
    or32AArch64Imm(Loc, getBits(Val, 4, 11));
314
3
    break;
315
21
  case R_AARCH64_MOVW_UABS_G0_NC:
316
1
    or32le(Loc, (Val & 0xFFFF) << 5);
317
1
    break;
318
21
  case R_AARCH64_MOVW_UABS_G1_NC:
319
1
    or32le(Loc, (Val & 0xFFFF0000) >> 11);
320
1
    break;
321
21
  case R_AARCH64_MOVW_UABS_G2_NC:
322
1
    or32le(Loc, (Val & 0xFFFF00000000) >> 27);
323
1
    break;
324
21
  case R_AARCH64_MOVW_UABS_G3:
325
2
    or32le(Loc, (Val & 0xFFFF000000000000) >> 43);
326
2
    break;
327
21
  case R_AARCH64_TSTBR14:
328
4
    checkInt<16>(Loc, Val, Type);
329
4
    or32le(Loc, (Val & 0xFFFC) << 3);
330
4
    break;
331
21
  case R_AARCH64_TLSLE_ADD_TPREL_HI12:
332
2
    checkInt<24>(Loc, Val, Type);
333
2
    or32AArch64Imm(Loc, Val >> 12);
334
2
    break;
335
21
  case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
336
2
  case R_AARCH64_TLSDESC_ADD_LO12:
337
2
    or32AArch64Imm(Loc, Val);
338
2
    break;
339
2
  default:
340
0
    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
341
237
  }
342
237
}
343
344
4
void AArch64::relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
345
4
  // TLSDESC Global-Dynamic relocation are in the form:
346
4
  //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
347
4
  //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
348
4
  //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
349
4
  //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
350
4
  //   blr     x1
351
4
  // And it can optimized to:
352
4
  //   movz    x0, #0x0, lsl #16
353
4
  //   movk    x0, #0x10
354
4
  //   nop
355
4
  //   nop
356
4
  checkUInt<32>(Loc, Val, Type);
357
4
358
4
  switch (Type) {
359
4
  case R_AARCH64_TLSDESC_ADD_LO12:
360
2
  case R_AARCH64_TLSDESC_CALL:
361
2
    write32le(Loc, 0xd503201f); // nop
362
2
    return;
363
2
  case R_AARCH64_TLSDESC_ADR_PAGE21:
364
1
    write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
365
1
    return;
366
2
  case R_AARCH64_TLSDESC_LD64_LO12:
367
1
    write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
368
1
    return;
369
2
  default:
370
0
    llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
371
0
  }
372
0
}
373
374
0
void AArch64::relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const {
375
0
  // TLSDESC Global-Dynamic relocation are in the form:
376
0
  //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
377
0
  //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
378
0
  //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
379
0
  //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
380
0
  //   blr     x1
381
0
  // And it can optimized to:
382
0
  //   adrp    x0, :gottprel:v
383
0
  //   ldr     x0, [x0, :gottprel_lo12:v]
384
0
  //   nop
385
0
  //   nop
386
0
387
0
  switch (Type) {
388
0
  case R_AARCH64_TLSDESC_ADD_LO12:
389
0
  case R_AARCH64_TLSDESC_CALL:
390
0
    write32le(Loc, 0xd503201f); // nop
391
0
    break;
392
0
  case R_AARCH64_TLSDESC_ADR_PAGE21:
393
0
    write32le(Loc, 0x90000000); // adrp
394
0
    relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
395
0
    break;
396
0
  case R_AARCH64_TLSDESC_LD64_LO12:
397
0
    write32le(Loc, 0xf9400000); // ldr
398
0
    relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
399
0
    break;
400
0
  default:
401
0
    llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
402
0
  }
403
0
}
404
405
3
void AArch64::relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
406
3
  checkUInt<32>(Loc, Val, Type);
407
3
408
3
  if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
409
2
    // Generate MOVZ.
410
2
    uint32_t RegNo = read32le(Loc) & 0x1f;
411
2
    write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
412
2
    return;
413
2
  }
414
1
  if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
415
1
    // Generate MOVK.
416
1
    uint32_t RegNo = read32le(Loc) & 0x1f;
417
1
    write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
418
1
    return;
419
1
  }
420
0
  llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
421
0
}
422
423
44
TargetInfo *elf::getAArch64TargetInfo() {
424
44
  static AArch64 Target;
425
44
  return &Target;
426
44
}