Coverage Report

Created: 2017-09-19 22:28

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/tools/lld/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- lib/FileFormat/MachO/ArchHandler_arm64.cpp -------------------------===//
2
//
3
//                             The LLVM Linker
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "ArchHandler.h"
11
#include "Atoms.h"
12
#include "MachONormalizedFileBinaryUtils.h"
13
#include "llvm/ADT/StringRef.h"
14
#include "llvm/ADT/StringSwitch.h"
15
#include "llvm/ADT/Triple.h"
16
#include "llvm/Support/Endian.h"
17
#include "llvm/Support/ErrorHandling.h"
18
#include "llvm/Support/Format.h"
19
20
using namespace llvm::MachO;
21
using namespace lld::mach_o::normalized;
22
23
namespace lld {
24
namespace mach_o {
25
26
using llvm::support::ulittle32_t;
27
using llvm::support::ulittle64_t;
28
29
using llvm::support::little32_t;
30
using llvm::support::little64_t;
31
32
class ArchHandler_arm64 : public ArchHandler {
33
public:
34
35
  ArchHandler_arm64() = default;
35
35
  ~ArchHandler_arm64() override = default;
36
37
18
  const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
38
39
156
  Reference::KindArch kindArch() override {
40
156
    return Reference::KindArch::AArch64;
41
156
  }
42
43
  /// Used by GOTPass to locate GOT References
44
96
  bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
45
96
    if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
46
0
      return false;
47
96
    assert(ref.kindArch() == Reference::KindArch::AArch64);
48
96
    switch (ref.kindValue()) {
49
8
    case gotPage21:
50
8
    case gotOffset12:
51
8
      canBypassGOT = true;
52
8
      return true;
53
3
    case delta32ToGOT:
54
3
    case unwindCIEToPersonalityFunction:
55
3
    case imageOffsetGot:
56
3
      canBypassGOT = false;
57
3
      return true;
58
85
    default:
59
85
      return false;
60
0
    }
61
0
  }
62
63
  /// Used by GOTPass to update GOT References.
64
11
  void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
65
11
    // If GOT slot was instanciated, transform:
66
11
    //   gotPage21/gotOffset12 -> page21/offset12scale8
67
11
    // If GOT slot optimized away, transform:
68
11
    //   gotPage21/gotOffset12 -> page21/addOffset12
69
11
    assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70
11
    assert(ref->kindArch() == Reference::KindArch::AArch64);
71
11
    switch (ref->kindValue()) {
72
4
    case gotPage21:
73
4
      const_cast<Reference *>(ref)->setKindValue(page21);
74
4
      break;
75
4
    case gotOffset12:
76
4
      const_cast<Reference *>(ref)->setKindValue(targetNowGOT ?
77
4
                                                 
offset12scale84
:
addOffset120
);
78
4
      break;
79
2
    case delta32ToGOT:
80
2
      const_cast<Reference *>(ref)->setKindValue(delta32);
81
2
      break;
82
1
    case imageOffsetGot:
83
1
      const_cast<Reference *>(ref)->setKindValue(imageOffset);
84
1
      break;
85
0
    default:
86
0
      llvm_unreachable("Not a GOT reference");
87
11
    }
88
11
  }
89
90
130
  const StubInfo &stubInfo() override { return _sStubInfo; }
91
92
  bool isCallSite(const Reference &) override;
93
0
  bool isNonCallBranch(const Reference &) override {
94
0
    return false;
95
0
  }
96
97
  bool isPointer(const Reference &) override;
98
  bool isPairedReloc(const normalized::Relocation &) override;
99
100
5
  bool needsCompactUnwind() override {
101
5
    return true;
102
5
  }
103
7
  Reference::KindValue imageOffsetKind() override {
104
7
    return imageOffset;
105
7
  }
106
1
  Reference::KindValue imageOffsetKindIndirect() override {
107
1
    return imageOffsetGot;
108
1
  }
109
110
4
  Reference::KindValue unwindRefToPersonalityFunctionKind() override {
111
4
    return unwindCIEToPersonalityFunction;
112
4
  }
113
114
8
  Reference::KindValue unwindRefToCIEKind() override {
115
8
    return negDelta32;
116
8
  }
117
118
14
  Reference::KindValue unwindRefToFunctionKind() override {
119
14
    return unwindFDEToFunction;
120
14
  }
121
122
0
  Reference::KindValue unwindRefToEhFrameKind() override {
123
0
    return unwindInfoToEhFrame;
124
0
  }
125
126
0
  Reference::KindValue pointerKind() override {
127
0
    return pointer64;
128
0
  }
129
130
7
  Reference::KindValue lazyImmediateLocationKind() override {
131
7
    return lazyImmediateLocation;
132
7
  }
133
134
6
  uint32_t dwarfCompactUnwindType() override {
135
6
    return 0x03000000;
136
6
  }
137
138
  llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
139
                               const DefinedAtom *inAtom,
140
                               uint32_t offsetInAtom,
141
                               uint64_t fixupAddress, bool isBig,
142
                               FindAtomBySectionAndAddress atomFromAddress,
143
                               FindAtomBySymbolIndex atomFromSymbolIndex,
144
                               Reference::KindValue *kind,
145
                               const lld::Atom **target,
146
                               Reference::Addend *addend) override;
147
  llvm::Error
148
      getPairReferenceInfo(const normalized::Relocation &reloc1,
149
                           const normalized::Relocation &reloc2,
150
                           const DefinedAtom *inAtom,
151
                           uint32_t offsetInAtom,
152
                           uint64_t fixupAddress, bool isBig, bool scatterable,
153
                           FindAtomBySectionAndAddress atomFromAddress,
154
                           FindAtomBySymbolIndex atomFromSymbolIndex,
155
                           Reference::KindValue *kind,
156
                           const lld::Atom **target,
157
                           Reference::Addend *addend) override;
158
159
36
  bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
160
36
    return (atom->contentType() == DefinedAtom::typeCString);
161
36
  }
162
163
  void generateAtomContent(const DefinedAtom &atom, bool relocatable,
164
                           FindAddressForAtom findAddress,
165
                           FindAddressForAtom findSectionAddress,
166
                           uint64_t imageBaseAddress,
167
                    llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
168
169
  void appendSectionRelocations(const DefinedAtom &atom,
170
                                uint64_t atomSectionOffset,
171
                                const Reference &ref,
172
                                FindSymbolIndexForAtom symbolIndexForAtom,
173
                                FindSectionIndexForAtom sectionIndexForAtom,
174
                                FindAddressForAtom addressForAtom,
175
                                normalized::Relocations &relocs) override;
176
177
private:
178
  static const Registry::KindStrings _sKindStrings[];
179
  static const StubInfo _sStubInfo;
180
181
  enum Arm64Kind : Reference::KindValue {
182
    invalid,               /// for error condition
183
184
    // Kinds found in mach-o .o files:
185
    branch26,              /// ex: bl   _foo
186
    page21,                /// ex: adrp x1, _foo@PAGE
187
    offset12,              /// ex: ldrb w0, [x1, _foo@PAGEOFF]
188
    offset12scale2,        /// ex: ldrs w0, [x1, _foo@PAGEOFF]
189
    offset12scale4,        /// ex: ldr  w0, [x1, _foo@PAGEOFF]
190
    offset12scale8,        /// ex: ldr  x0, [x1, _foo@PAGEOFF]
191
    offset12scale16,       /// ex: ldr  q0, [x1, _foo@PAGEOFF]
192
    gotPage21,             /// ex: adrp x1, _foo@GOTPAGE
193
    gotOffset12,           /// ex: ldr  w0, [x1, _foo@GOTPAGEOFF]
194
    tlvPage21,             /// ex: adrp x1, _foo@TLVPAGE
195
    tlvOffset12,           /// ex: ldr  w0, [x1, _foo@TLVPAGEOFF]
196
197
    pointer64,             /// ex: .quad _foo
198
    delta64,               /// ex: .quad _foo - .
199
    delta32,               /// ex: .long _foo - .
200
    negDelta32,            /// ex: .long . - _foo
201
    pointer64ToGOT,        /// ex: .quad _foo@GOT
202
    delta32ToGOT,          /// ex: .long _foo@GOT - .
203
204
    // Kinds introduced by Passes:
205
    addOffset12,           /// Location contains LDR to change into ADD.
206
    lazyPointer,           /// Location contains a lazy pointer.
207
    lazyImmediateLocation, /// Location contains immediate value used in stub.
208
    imageOffset,           /// Location contains offset of atom in final image
209
    imageOffsetGot,        /// Location contains offset of GOT entry for atom in
210
                           /// final image (typically personality function).
211
    unwindCIEToPersonalityFunction,   /// Nearly delta32ToGOT, but cannot be
212
                           /// rematerialized in relocatable object
213
                           /// (yay for implicit contracts!).
214
    unwindFDEToFunction,   /// Nearly delta64, but cannot be rematerialized in
215
                           /// relocatable object (yay for implicit contracts!).
216
    unwindInfoToEhFrame,   /// Fix low 24 bits of compact unwind encoding to
217
                           /// refer to __eh_frame entry.
218
  };
219
220
  void applyFixupFinal(const Reference &ref, uint8_t *location,
221
                       uint64_t fixupAddress, uint64_t targetAddress,
222
                       uint64_t inAtomAddress, uint64_t imageBaseAddress,
223
                       FindAddressForAtom findSectionAddress);
224
225
  void applyFixupRelocatable(const Reference &ref, uint8_t *location,
226
                             uint64_t fixupAddress, uint64_t targetAddress,
227
                             uint64_t inAtomAddress, bool targetUnnamed);
228
229
  // Utility functions for inspecting/updating instructions.
230
  static uint32_t setDisplacementInBranch26(uint32_t instr, int32_t disp);
231
  static uint32_t setDisplacementInADRP(uint32_t instr, int64_t disp);
232
  static Arm64Kind offset12KindFromInstruction(uint32_t instr);
233
  static uint32_t setImm12(uint32_t instr, uint32_t offset);
234
};
235
236
const Registry::KindStrings ArchHandler_arm64::_sKindStrings[] = {
237
  LLD_KIND_STRING_ENTRY(invalid),
238
  LLD_KIND_STRING_ENTRY(branch26),
239
  LLD_KIND_STRING_ENTRY(page21),
240
  LLD_KIND_STRING_ENTRY(offset12),
241
  LLD_KIND_STRING_ENTRY(offset12scale2),
242
  LLD_KIND_STRING_ENTRY(offset12scale4),
243
  LLD_KIND_STRING_ENTRY(offset12scale8),
244
  LLD_KIND_STRING_ENTRY(offset12scale16),
245
  LLD_KIND_STRING_ENTRY(gotPage21),
246
  LLD_KIND_STRING_ENTRY(gotOffset12),
247
  LLD_KIND_STRING_ENTRY(tlvPage21),
248
  LLD_KIND_STRING_ENTRY(tlvOffset12),
249
  LLD_KIND_STRING_ENTRY(pointer64),
250
  LLD_KIND_STRING_ENTRY(delta64),
251
  LLD_KIND_STRING_ENTRY(delta32),
252
  LLD_KIND_STRING_ENTRY(negDelta32),
253
  LLD_KIND_STRING_ENTRY(pointer64ToGOT),
254
  LLD_KIND_STRING_ENTRY(delta32ToGOT),
255
256
  LLD_KIND_STRING_ENTRY(addOffset12),
257
  LLD_KIND_STRING_ENTRY(lazyPointer),
258
  LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
259
  LLD_KIND_STRING_ENTRY(imageOffset),
260
  LLD_KIND_STRING_ENTRY(imageOffsetGot),
261
  LLD_KIND_STRING_ENTRY(unwindCIEToPersonalityFunction),
262
  LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
263
  LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
264
265
  LLD_KIND_STRING_END
266
};
267
268
const ArchHandler::StubInfo ArchHandler_arm64::_sStubInfo = {
269
  "dyld_stub_binder",
270
271
  // Lazy pointer references
272
  { Reference::KindArch::AArch64, pointer64, 0, 0 },
273
  { Reference::KindArch::AArch64, lazyPointer, 0, 0 },
274
275
  // GOT pointer to dyld_stub_binder
276
  { Reference::KindArch::AArch64, pointer64, 0, 0 },
277
278
  // arm64 code alignment 2^1
279
  1,
280
281
  // Stub size and code
282
  12,
283
  { 0x10, 0x00, 0x00, 0x90,   // ADRP  X16, lazy_pointer@page
284
    0x10, 0x02, 0x40, 0xF9,   // LDR   X16, [X16, lazy_pointer@pageoff]
285
    0x00, 0x02, 0x1F, 0xD6 }, // BR    X16
286
  { Reference::KindArch::AArch64, page21, 0, 0 },
287
  { true,                         offset12scale8, 4, 0 },
288
289
  // Stub Helper size and code
290
  12,
291
  { 0x50, 0x00, 0x00, 0x18,   //      LDR   W16, L0
292
    0x00, 0x00, 0x00, 0x14,   //      LDR   B  helperhelper
293
    0x00, 0x00, 0x00, 0x00 }, // L0: .long 0
294
  { Reference::KindArch::AArch64, lazyImmediateLocation, 8, 0 },
295
  { Reference::KindArch::AArch64, branch26, 4, 0 },
296
297
  // Stub helper image cache content type
298
  DefinedAtom::typeGOT,
299
300
  // Stub Helper-Common size and code
301
  24,
302
  // Stub helper alignment
303
  2,
304
  { 0x11, 0x00, 0x00, 0x90,   //  ADRP  X17, dyld_ImageLoaderCache@page
305
    0x31, 0x02, 0x00, 0x91,   //  ADD   X17, X17, dyld_ImageLoaderCache@pageoff
306
    0xF0, 0x47, 0xBF, 0xA9,   //  STP   X16/X17, [SP, #-16]!
307
    0x10, 0x00, 0x00, 0x90,   //  ADRP  X16, _fast_lazy_bind@page
308
    0x10, 0x02, 0x40, 0xF9,   //  LDR   X16, [X16,_fast_lazy_bind@pageoff]
309
    0x00, 0x02, 0x1F, 0xD6 }, //  BR    X16
310
  { Reference::KindArch::AArch64, page21,   0, 0 },
311
  { true,                         offset12, 4, 0 },
312
  { Reference::KindArch::AArch64, page21,   12, 0 },
313
  { true,                         offset12scale8, 16, 0 }
314
};
315
316
29
bool ArchHandler_arm64::isCallSite(const Reference &ref) {
317
29
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
318
0
    return false;
319
29
  assert(ref.kindArch() == Reference::KindArch::AArch64);
320
29
  return (ref.kindValue() == branch26);
321
29
}
322
323
102
bool ArchHandler_arm64::isPointer(const Reference &ref) {
324
102
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
325
0
    return false;
326
102
  assert(ref.kindArch() == Reference::KindArch::AArch64);
327
102
  Reference::KindValue kind = ref.kindValue();
328
102
  return (kind == pointer64);
329
102
}
330
331
109
bool ArchHandler_arm64::isPairedReloc(const Relocation &r) {
332
103
  return ((r.type == ARM64_RELOC_ADDEND) || (r.type == ARM64_RELOC_SUBTRACTOR));
333
109
}
334
335
uint32_t ArchHandler_arm64::setDisplacementInBranch26(uint32_t instr,
336
25
                                                      int32_t displacement) {
337
25
  assert((displacement <= 134217727) && (displacement > (-134217728)) &&
338
25
         "arm64 branch out of range");
339
25
  return (instr & 0xFC000000) | ((uint32_t)(displacement >> 2) & 0x03FFFFFF);
340
25
}
341
342
uint32_t ArchHandler_arm64::setDisplacementInADRP(uint32_t instruction,
343
31
                                                  int64_t displacement) {
344
31
  assert((displacement <= 0x100000000LL) && (displacement > (-0x100000000LL)) &&
345
31
         "arm64 ADRP out of range");
346
31
  assert(((instruction & 0x9F000000) == 0x90000000) &&
347
31
         "reloc not on ADRP instruction");
348
31
  uint32_t immhi = (displacement >> 9) & (0x00FFFFE0);
349
31
  uint32_t immlo = (displacement << 17) & (0x60000000);
350
31
  return (instruction & 0x9F00001F) | immlo | immhi;
351
31
}
352
353
ArchHandler_arm64::Arm64Kind
354
18
ArchHandler_arm64::offset12KindFromInstruction(uint32_t instruction) {
355
18
  if (
instruction & 0x0800000018
) {
356
12
    switch ((instruction >> 30) & 0x3) {
357
4
    case 0:
358
4
      if ((instruction & 0x04800000) == 0x04800000)
359
2
        return offset12scale16;
360
2
      return offset12;
361
2
    case 1:
362
2
      return offset12scale2;
363
4
    case 2:
364
4
      return offset12scale4;
365
2
    case 3:
366
2
      return offset12scale8;
367
6
    }
368
6
  }
369
6
  return offset12;
370
6
}
371
372
39
uint32_t ArchHandler_arm64::setImm12(uint32_t instruction, uint32_t offset) {
373
39
  assert(((offset & 0xFFFFF000) == 0) && "imm12 offset out of range");
374
39
  uint32_t imm12 = offset << 10;
375
39
  return (instruction & 0xFFC003FF) | imm12;
376
39
}
377
378
llvm::Error ArchHandler_arm64::getReferenceInfo(
379
    const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom,
380
    uint64_t fixupAddress, bool isBig,
381
    FindAtomBySectionAndAddress atomFromAddress,
382
    FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
383
88
    const lld::Atom **target, Reference::Addend *addend) {
384
88
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
385
88
  switch (relocPattern(reloc)) {
386
16
  case ARM64_RELOC_BRANCH26           | rPcRel | rExtern | rLength4:
387
16
    // ex: bl _foo
388
16
    *kind = branch26;
389
16
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
390
0
      return ec;
391
16
    *addend = 0;
392
16
    return llvm::Error::success();
393
8
  case ARM64_RELOC_PAGE21             | rPcRel | rExtern | rLength4:
394
8
    // ex: adrp x1, _foo@PAGE
395
8
    *kind = page21;
396
8
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
397
0
      return ec;
398
8
    *addend = 0;
399
8
    return llvm::Error::success();
400
16
  case ARM64_RELOC_PAGEOFF12                   | rExtern | rLength4:
401
16
    // ex: ldr x0, [x1, _foo@PAGEOFF]
402
16
    *kind = offset12KindFromInstruction(*(const little32_t *)fixupContent);
403
16
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
404
0
      return ec;
405
16
    *addend = 0;
406
16
    return llvm::Error::success();
407
6
  case ARM64_RELOC_GOT_LOAD_PAGE21    | rPcRel | rExtern | rLength4:
408
6
    // ex: adrp x1, _foo@GOTPAGE
409
6
    *kind = gotPage21;
410
6
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
411
0
      return ec;
412
6
    *addend = 0;
413
6
    return llvm::Error::success();
414
6
  case ARM64_RELOC_GOT_LOAD_PAGEOFF12          | rExtern | rLength4:
415
6
    // ex: ldr x0, [x1, _foo@GOTPAGEOFF]
416
6
    *kind = gotOffset12;
417
6
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
418
0
      return ec;
419
6
    *addend = 0;
420
6
    return llvm::Error::success();
421
2
  case ARM64_RELOC_TLVP_LOAD_PAGE21   | rPcRel | rExtern | rLength4:
422
2
    // ex: adrp x1, _foo@TLVPAGE
423
2
    *kind = tlvPage21;
424
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
425
0
      return ec;
426
2
    *addend = 0;
427
2
    return llvm::Error::success();
428
2
  case ARM64_RELOC_TLVP_LOAD_PAGEOFF12         | rExtern | rLength4:
429
2
    // ex: ldr x0, [x1, _foo@TLVPAGEOFF]
430
2
    *kind = tlvOffset12;
431
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
432
0
      return ec;
433
2
    *addend = 0;
434
2
    return llvm::Error::success();
435
13
  case ARM64_RELOC_UNSIGNED                    | rExtern | rLength8:
436
13
    // ex: .quad _foo + N
437
13
    *kind = pointer64;
438
13
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
439
0
      return ec;
440
13
    *addend = *(const little64_t *)fixupContent;
441
13
    return llvm::Error::success();
442
12
  case ARM64_RELOC_UNSIGNED                              | rLength8:
443
12
     // ex: .quad Lfoo + N
444
12
     *kind = pointer64;
445
12
     return atomFromAddress(reloc.symbol, *(const little64_t *)fixupContent,
446
12
                            target, addend);
447
2
  case ARM64_RELOC_POINTER_TO_GOT              | rExtern | rLength8:
448
2
    // ex: .quad _foo@GOT
449
2
    *kind = pointer64ToGOT;
450
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
451
0
      return ec;
452
2
    *addend = 0;
453
2
    return llvm::Error::success();
454
5
  case ARM64_RELOC_POINTER_TO_GOT     | rPcRel | rExtern | rLength4:
455
5
    // ex: .long _foo@GOT - .
456
5
457
5
    // If we are in an .eh_frame section, then the kind of the relocation should
458
5
    // not be delta32ToGOT.  It may instead be unwindCIEToPersonalityFunction.
459
5
    if (inAtom->contentType() == DefinedAtom::typeCFI)
460
2
      *kind = unwindCIEToPersonalityFunction;
461
5
    else
462
3
      *kind = delta32ToGOT;
463
5
464
5
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
465
0
      return ec;
466
5
    *addend = 0;
467
5
    return llvm::Error::success();
468
0
  default:
469
0
    return llvm::make_error<GenericError>("unsupported arm64 relocation type");
470
0
  }
471
0
}
472
473
llvm::Error ArchHandler_arm64::getPairReferenceInfo(
474
    const normalized::Relocation &reloc1, const normalized::Relocation &reloc2,
475
    const DefinedAtom *inAtom, uint32_t offsetInAtom, uint64_t fixupAddress,
476
    bool swap, bool scatterable, FindAtomBySectionAndAddress atomFromAddress,
477
    FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
478
21
    const lld::Atom **target, Reference::Addend *addend) {
479
21
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
480
21
  switch (relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
481
2
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
482
2
         ARM64_RELOC_BRANCH26           | rPcRel | rExtern | rLength4):
483
2
    // ex: bl _foo+8
484
2
    *kind = branch26;
485
2
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
486
0
      return ec;
487
2
    *addend = reloc1.symbol;
488
2
    return llvm::Error::success();
489
2
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
490
2
         ARM64_RELOC_PAGE21             | rPcRel | rExtern | rLength4):
491
2
    // ex: adrp x1, _foo@PAGE
492
2
    *kind = page21;
493
2
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
494
0
      return ec;
495
2
    *addend = reloc1.symbol;
496
2
    return llvm::Error::success();
497
2
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
498
2
         ARM64_RELOC_PAGEOFF12                   | rExtern | rLength4): {
499
2
    // ex: ldr w0, [x1, _foo@PAGEOFF]
500
2
    uint32_t cont32 = (int32_t)*(const little32_t *)fixupContent;
501
2
    *kind = offset12KindFromInstruction(cont32);
502
2
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
503
0
      return ec;
504
2
    *addend = reloc1.symbol;
505
2
    return llvm::Error::success();
506
2
  }
507
11
  case ((ARM64_RELOC_SUBTRACTOR                  | rExtern | rLength8) << 16 |
508
11
         ARM64_RELOC_UNSIGNED                    | rExtern | rLength8):
509
11
    // ex: .quad _foo - .
510
11
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
511
0
      return ec;
512
11
513
11
    // If we are in an .eh_frame section, then the kind of the relocation should
514
11
    // not be delta64.  It may instead be unwindFDEToFunction.
515
11
    
if (11
inAtom->contentType() == DefinedAtom::typeCFI11
)
516
4
      *kind = unwindFDEToFunction;
517
11
    else
518
7
      *kind = delta64;
519
11
520
11
    // The offsets of the 2 relocations must match
521
11
    if (reloc1.offset != reloc2.offset)
522
1
      return llvm::make_error<GenericError>(
523
1
                                    "paired relocs must have the same offset");
524
10
    *addend = (int64_t)*(const little64_t *)fixupContent + offsetInAtom;
525
10
    return llvm::Error::success();
526
4
  case ((ARM64_RELOC_SUBTRACTOR                  | rExtern | rLength4) << 16 |
527
4
         ARM64_RELOC_UNSIGNED                    | rExtern | rLength4):
528
4
    // ex: .quad _foo - .
529
4
    *kind = delta32;
530
4
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
531
0
      return ec;
532
4
    *addend = (int32_t)*(const little32_t *)fixupContent + offsetInAtom;
533
4
    return llvm::Error::success();
534
0
  default:
535
0
    return llvm::make_error<GenericError>("unsupported arm64 relocation pair");
536
0
  }
537
0
}
538
539
void ArchHandler_arm64::generateAtomContent(
540
    const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
541
    FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
542
119
    llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
543
119
  // Copy raw bytes.
544
119
  std::copy(atom.rawContent().begin(), atom.rawContent().end(),
545
119
            atomContentBuffer.begin());
546
119
  // Apply fix-ups.
547
#ifndef NDEBUG
548
  if (atom.begin() != atom.end()) {
549
    DEBUG_WITH_TYPE("atom-content", llvm::dbgs()
550
                    << "Applying fixups to atom:\n"
551
                    << "   address="
552
                    << llvm::format("    0x%09lX", &atom)
553
                    << ", file=#"
554
                    << atom.file().ordinal()
555
                    << ", atom=#"
556
                    << atom.ordinal()
557
                    << ", name="
558
                    << atom.name()
559
                    << ", type="
560
                    << atom.contentType()
561
                    << "\n");
562
  }
563
#endif
564
119
  for (const Reference *ref : atom) {
565
204
    uint32_t offset = ref->offsetInAtom();
566
204
    const Atom *target = ref->target();
567
204
    bool targetUnnamed = target->name().empty();
568
204
    uint64_t targetAddress = 0;
569
204
    if (isa<DefinedAtom>(target))
570
153
      targetAddress = findAddress(*target);
571
204
    uint64_t atomAddress = findAddress(atom);
572
204
    uint64_t fixupAddress = atomAddress + offset;
573
204
    if (
relocatable204
) {
574
102
      applyFixupRelocatable(*ref, &atomContentBuffer[offset], fixupAddress,
575
102
                            targetAddress, atomAddress, targetUnnamed);
576
204
    } else {
577
102
      applyFixupFinal(*ref, &atomContentBuffer[offset], fixupAddress,
578
102
                      targetAddress, atomAddress, imageBaseAddress,
579
102
                      findSectionAddress);
580
102
    }
581
204
  }
582
119
}
583
584
void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc,
585
                                        uint64_t fixupAddress,
586
                                        uint64_t targetAddress,
587
                                        uint64_t inAtomAddress,
588
                                        uint64_t imageBaseAddress,
589
102
                                        FindAddressForAtom findSectionAddress) {
590
102
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
591
0
    return;
592
102
  assert(ref.kindArch() == Reference::KindArch::AArch64);
593
102
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
594
102
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
595
102
  int32_t displacement;
596
102
  uint32_t instruction;
597
102
  uint32_t value32;
598
102
  uint32_t value64;
599
102
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
600
17
  case branch26:
601
17
    displacement = (targetAddress - fixupAddress) + ref.addend();
602
17
    *loc32 = setDisplacementInBranch26(*loc32, displacement);
603
17
    return;
604
19
  case page21:
605
19
  case gotPage21:
606
19
  case tlvPage21:
607
19
    displacement =
608
19
        ((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096));
609
19
    *loc32 = setDisplacementInADRP(*loc32, displacement);
610
19
    return;
611
5
  case offset12:
612
5
  case gotOffset12:
613
5
  case tlvOffset12:
614
5
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
615
5
    *loc32 = setImm12(*loc32, displacement);
616
5
    return;
617
0
  case offset12scale2:
618
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
619
0
    assert(((displacement & 0x1) == 0) &&
620
0
           "scaled imm12 not accessing 2-byte aligneds");
621
0
    *loc32 = setImm12(*loc32, displacement >> 1);
622
0
    return;
623
0
  case offset12scale4:
624
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
625
0
    assert(((displacement & 0x3) == 0) &&
626
0
           "scaled imm12 not accessing 4-byte aligned");
627
0
    *loc32 = setImm12(*loc32, displacement >> 2);
628
0
    return;
629
14
  case offset12scale8:
630
14
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
631
14
    assert(((displacement & 0x7) == 0) &&
632
14
           "scaled imm12 not accessing 8-byte aligned");
633
14
    *loc32 = setImm12(*loc32, displacement >> 3);
634
14
    return;
635
0
  case offset12scale16:
636
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
637
0
    assert(((displacement & 0xF) == 0) &&
638
0
           "scaled imm12 not accessing 16-byte aligned");
639
0
    *loc32 = setImm12(*loc32, displacement >> 4);
640
0
    return;
641
0
  case addOffset12:
642
0
    instruction = *loc32;
643
0
    assert(((instruction & 0xFFC00000) == 0xF9400000) &&
644
0
           "GOT reloc is not an LDR instruction");
645
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
646
0
    value32 = 0x91000000 | (instruction & 0x000003FF);
647
0
    instruction = setImm12(value32, displacement);
648
0
    *loc32 = instruction;
649
0
    return;
650
16
  case pointer64:
651
16
  case pointer64ToGOT:
652
16
    *loc64 = targetAddress + ref.addend();
653
16
    return;
654
0
  case delta64:
655
0
  case unwindFDEToFunction:
656
0
    *loc64 = (targetAddress - fixupAddress) + ref.addend();
657
0
    return;
658
2
  case delta32:
659
2
  case delta32ToGOT:
660
2
  case unwindCIEToPersonalityFunction:
661
2
    *loc32 = (targetAddress - fixupAddress) + ref.addend();
662
2
    return;
663
0
  case negDelta32:
664
0
    *loc32 = fixupAddress - targetAddress + ref.addend();
665
0
    return;
666
7
  case lazyPointer:
667
7
    // Do nothing
668
7
    return;
669
14
  case lazyImmediateLocation:
670
14
    *loc32 = ref.addend();
671
14
    return;
672
8
  case imageOffset:
673
8
    *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
674
8
    return;
675
0
  case imageOffsetGot:
676
0
    llvm_unreachable("imageOffsetGot should have been changed to imageOffset");
677
0
    break;
678
0
  case unwindInfoToEhFrame:
679
0
    value64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
680
0
    assert(value64 < 0xffffffU && "offset in __eh_frame too large");
681
0
    *loc32 = (*loc32 & 0xff000000U) | value64;
682
0
    return;
683
0
  case invalid:
684
0
    // Fall into llvm_unreachable().
685
0
    break;
686
0
  }
687
0
  
llvm_unreachable0
("invalid arm64 Reference Kind");
688
0
}
689
690
void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref,
691
                                              uint8_t *loc,
692
                                              uint64_t fixupAddress,
693
                                              uint64_t targetAddress,
694
                                              uint64_t inAtomAddress,
695
102
                                              bool targetUnnamed) {
696
102
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
697
0
    return;
698
102
  assert(ref.kindArch() == Reference::KindArch::AArch64);
699
102
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
700
102
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
701
102
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
702
8
  case branch26:
703
8
    *loc32 = setDisplacementInBranch26(*loc32, 0);
704
8
    return;
705
12
  case page21:
706
12
  case gotPage21:
707
12
  case tlvPage21:
708
12
    *loc32 = setDisplacementInADRP(*loc32, 0);
709
12
    return;
710
20
  case offset12:
711
20
  case offset12scale2:
712
20
  case offset12scale4:
713
20
  case offset12scale8:
714
20
  case offset12scale16:
715
20
  case gotOffset12:
716
20
  case tlvOffset12:
717
20
    *loc32 = setImm12(*loc32, 0);
718
20
    return;
719
20
  case pointer64:
720
20
    if (targetUnnamed)
721
2
      *loc64 = targetAddress + ref.addend();
722
20
    else
723
18
      *loc64 = ref.addend();
724
20
    return;
725
6
  case delta64:
726
6
    *loc64 = ref.addend() + inAtomAddress - fixupAddress;
727
6
    return;
728
14
  case unwindFDEToFunction:
729
14
    // We don't emit unwindFDEToFunction in -r mode as they are implicitly
730
14
    // generated from the data in the __eh_frame section.  So here we need
731
14
    // to use the targetAddress so that we can generate the full relocation
732
14
    // when we parse again later.
733
14
    *loc64 = targetAddress - fixupAddress;
734
14
    return;
735
4
  case delta32:
736
4
    *loc32 = ref.addend() + inAtomAddress - fixupAddress;
737
4
    return;
738
8
  case negDelta32:
739
8
    // We don't emit negDelta32 in -r mode as they are implicitly
740
8
    // generated from the data in the __eh_frame section.  So here we need
741
8
    // to use the targetAddress so that we can generate the full relocation
742
8
    // when we parse again later.
743
8
    *loc32 = fixupAddress - targetAddress + ref.addend();
744
8
    return;
745
2
  case pointer64ToGOT:
746
2
    *loc64 = 0;
747
2
    return;
748
2
  case delta32ToGOT:
749
2
    *loc32 = inAtomAddress - fixupAddress;
750
2
    return;
751
6
  case unwindCIEToPersonalityFunction:
752
6
    // We don't emit unwindCIEToPersonalityFunction in -r mode as they are
753
6
    // implicitly generated from the data in the __eh_frame section.  So here we
754
6
    // need to use the targetAddress so that we can generate the full relocation
755
6
    // when we parse again later.
756
6
    *loc32 = targetAddress - fixupAddress;
757
6
    return;
758
0
  case addOffset12:
759
0
    llvm_unreachable("lazy reference kind implies GOT pass was run");
760
0
  case lazyPointer:
761
0
  case lazyImmediateLocation:
762
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
763
0
  case imageOffset:
764
0
  case imageOffsetGot:
765
0
  case unwindInfoToEhFrame:
766
0
    llvm_unreachable("fixup implies __unwind_info");
767
0
    return;
768
0
  case invalid:
769
0
    // Fall into llvm_unreachable().
770
0
    break;
771
0
  }
772
0
  
llvm_unreachable0
("unknown arm64 Reference Kind");
773
0
}
774
775
void ArchHandler_arm64::appendSectionRelocations(
776
    const DefinedAtom &atom, uint64_t atomSectionOffset, const Reference &ref,
777
    FindSymbolIndexForAtom symbolIndexForAtom,
778
    FindSectionIndexForAtom sectionIndexForAtom,
779
74
    FindAddressForAtom addressForAtom, normalized::Relocations &relocs) {
780
74
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
781
0
    return;
782
74
  assert(ref.kindArch() == Reference::KindArch::AArch64);
783
74
  uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
784
74
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
785
8
  case branch26:
786
8
    if (
ref.addend()8
) {
787
2
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
788
2
                  ARM64_RELOC_ADDEND | rLength4);
789
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
790
2
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
791
8
     } else {
792
6
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
793
6
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
794
6
    }
795
8
    return;
796
8
  case page21:
797
8
    if (
ref.addend()8
) {
798
2
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
799
2
                  ARM64_RELOC_ADDEND | rLength4);
800
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
801
2
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
802
8
     } else {
803
6
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
804
6
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
805
6
    }
806
8
    return;
807
16
  case offset12:
808
16
  case offset12scale2:
809
16
  case offset12scale4:
810
16
  case offset12scale8:
811
16
  case offset12scale16:
812
16
    if (
ref.addend()16
) {
813
2
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
814
2
                  ARM64_RELOC_ADDEND | rLength4);
815
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
816
2
                  ARM64_RELOC_PAGEOFF12  | rExtern | rLength4);
817
16
     } else {
818
14
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
819
14
                  ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
820
14
    }
821
16
    return;
822
2
  case gotPage21:
823
2
    assert(ref.addend() == 0);
824
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
825
2
                  ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
826
2
    return;
827
2
  case gotOffset12:
828
2
    assert(ref.addend() == 0);
829
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
830
2
                  ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4);
831
2
    return;
832
2
  case tlvPage21:
833
2
    assert(ref.addend() == 0);
834
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
835
2
                  ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
836
2
    return;
837
2
  case tlvOffset12:
838
2
    assert(ref.addend() == 0);
839
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
840
2
                  ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4);
841
2
    return;
842
20
  case pointer64:
843
20
    if (ref.target()->name().empty())
844
2
      appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
845
2
                  ARM64_RELOC_UNSIGNED           | rLength8);
846
20
    else
847
18
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
848
18
                  ARM64_RELOC_UNSIGNED | rExtern | rLength8);
849
20
    return;
850
6
  case delta64:
851
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
852
6
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength8);
853
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
854
6
                ARM64_RELOC_UNSIGNED  | rExtern | rLength8);
855
6
    return;
856
4
  case delta32:
857
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
858
4
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength4 );
859
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
860
4
                ARM64_RELOC_UNSIGNED   | rExtern | rLength4 );
861
4
    return;
862
2
  case pointer64ToGOT:
863
2
    assert(ref.addend() == 0);
864
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
865
2
                  ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8);
866
2
    return;
867
2
  case delta32ToGOT:
868
2
    assert(ref.addend() == 0);
869
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
870
2
                  ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4);
871
2
    return;
872
0
  case addOffset12:
873
0
    llvm_unreachable("lazy reference kind implies GOT pass was run");
874
0
  case lazyPointer:
875
0
  case lazyImmediateLocation:
876
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
877
0
  case imageOffset:
878
0
  case imageOffsetGot:
879
0
    llvm_unreachable("deltas from mach_header can only be in final images");
880
0
  case unwindCIEToPersonalityFunction:
881
0
  case unwindFDEToFunction:
882
0
  case unwindInfoToEhFrame:
883
0
  case negDelta32:
884
0
    // Do nothing.
885
0
    return;
886
0
  case invalid:
887
0
    // Fall into llvm_unreachable().
888
0
    break;
889
0
  }
890
0
  
llvm_unreachable0
("unknown arm64 Reference Kind");
891
0
}
892
893
35
std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_arm64() {
894
35
  return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_arm64());
895
35
}
896
897
} // namespace mach_o
898
} // namespace lld