Coverage Report

Created: 2018-01-17 21:32

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/lld/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- lib/FileFormat/MachO/ArchHandler_arm64.cpp -------------------------===//
2
//
3
//                             The LLVM Linker
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "ArchHandler.h"
11
#include "Atoms.h"
12
#include "MachONormalizedFileBinaryUtils.h"
13
#include "llvm/ADT/StringRef.h"
14
#include "llvm/ADT/StringSwitch.h"
15
#include "llvm/ADT/Triple.h"
16
#include "llvm/Support/Endian.h"
17
#include "llvm/Support/ErrorHandling.h"
18
#include "llvm/Support/Format.h"
19
20
using namespace llvm::MachO;
21
using namespace lld::mach_o::normalized;
22
23
namespace lld {
24
namespace mach_o {
25
26
using llvm::support::ulittle32_t;
27
using llvm::support::ulittle64_t;
28
29
using llvm::support::little32_t;
30
using llvm::support::little64_t;
31
32
class ArchHandler_arm64 : public ArchHandler {
33
public:
34
12
  ArchHandler_arm64() = default;
35
12
  ~ArchHandler_arm64() override = default;
36
37
6
  const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
38
39
45
  Reference::KindArch kindArch() override {
40
45
    return Reference::KindArch::AArch64;
41
45
  }
42
43
  /// Used by GOTPass to locate GOT References
44
0
  bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
45
0
    if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
46
0
      return false;
47
0
    assert(ref.kindArch() == Reference::KindArch::AArch64);
48
0
    switch (ref.kindValue()) {
49
0
    case gotPage21:
50
0
    case gotOffset12:
51
0
      canBypassGOT = true;
52
0
      return true;
53
0
    case delta32ToGOT:
54
0
    case unwindCIEToPersonalityFunction:
55
0
    case imageOffsetGot:
56
0
      canBypassGOT = false;
57
0
      return true;
58
0
    default:
59
0
      return false;
60
0
    }
61
0
  }
62
63
  /// Used by GOTPass to update GOT References.
64
0
  void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
65
0
    // If GOT slot was instanciated, transform:
66
0
    //   gotPage21/gotOffset12 -> page21/offset12scale8
67
0
    // If GOT slot optimized away, transform:
68
0
    //   gotPage21/gotOffset12 -> page21/addOffset12
69
0
    assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70
0
    assert(ref->kindArch() == Reference::KindArch::AArch64);
71
0
    switch (ref->kindValue()) {
72
0
    case gotPage21:
73
0
      const_cast<Reference *>(ref)->setKindValue(page21);
74
0
      break;
75
0
    case gotOffset12:
76
0
      const_cast<Reference *>(ref)->setKindValue(targetNowGOT ?
77
0
                                                 offset12scale8 : addOffset12);
78
0
      break;
79
0
    case delta32ToGOT:
80
0
      const_cast<Reference *>(ref)->setKindValue(delta32);
81
0
      break;
82
0
    case imageOffsetGot:
83
0
      const_cast<Reference *>(ref)->setKindValue(imageOffset);
84
0
      break;
85
0
    default:
86
0
      llvm_unreachable("Not a GOT reference");
87
0
    }
88
0
  }
89
90
0
  const StubInfo &stubInfo() override { return _sStubInfo; }
91
92
  bool isCallSite(const Reference &) override;
93
0
  bool isNonCallBranch(const Reference &) override {
94
0
    return false;
95
0
  }
96
97
  bool isPointer(const Reference &) override;
98
  bool isPairedReloc(const normalized::Relocation &) override;
99
100
0
  bool needsCompactUnwind() override {
101
0
    return true;
102
0
  }
103
0
  Reference::KindValue imageOffsetKind() override {
104
0
    return imageOffset;
105
0
  }
106
0
  Reference::KindValue imageOffsetKindIndirect() override {
107
0
    return imageOffsetGot;
108
0
  }
109
110
2
  Reference::KindValue unwindRefToPersonalityFunctionKind() override {
111
2
    return unwindCIEToPersonalityFunction;
112
2
  }
113
114
4
  Reference::KindValue unwindRefToCIEKind() override {
115
4
    return negDelta32;
116
4
  }
117
118
8
  Reference::KindValue unwindRefToFunctionKind() override {
119
8
    return unwindFDEToFunction;
120
8
  }
121
122
0
  Reference::KindValue unwindRefToEhFrameKind() override {
123
0
    return unwindInfoToEhFrame;
124
0
  }
125
126
0
  Reference::KindValue pointerKind() override {
127
0
    return pointer64;
128
0
  }
129
130
0
  Reference::KindValue lazyImmediateLocationKind() override {
131
0
    return lazyImmediateLocation;
132
0
  }
133
134
0
  uint32_t dwarfCompactUnwindType() override {
135
0
    return 0x03000000;
136
0
  }
137
138
  llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
139
                               const DefinedAtom *inAtom,
140
                               uint32_t offsetInAtom,
141
                               uint64_t fixupAddress, bool isBig,
142
                               FindAtomBySectionAndAddress atomFromAddress,
143
                               FindAtomBySymbolIndex atomFromSymbolIndex,
144
                               Reference::KindValue *kind,
145
                               const lld::Atom **target,
146
                               Reference::Addend *addend) override;
147
  llvm::Error
148
      getPairReferenceInfo(const normalized::Relocation &reloc1,
149
                           const normalized::Relocation &reloc2,
150
                           const DefinedAtom *inAtom,
151
                           uint32_t offsetInAtom,
152
                           uint64_t fixupAddress, bool isBig, bool scatterable,
153
                           FindAtomBySectionAndAddress atomFromAddress,
154
                           FindAtomBySymbolIndex atomFromSymbolIndex,
155
                           Reference::KindValue *kind,
156
                           const lld::Atom **target,
157
                           Reference::Addend *addend) override;
158
159
19
  bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
160
19
    return (atom->contentType() == DefinedAtom::typeCString);
161
19
  }
162
163
  void generateAtomContent(const DefinedAtom &atom, bool relocatable,
164
                           FindAddressForAtom findAddress,
165
                           FindAddressForAtom findSectionAddress,
166
                           uint64_t imageBaseAddress,
167
                    llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
168
169
  void appendSectionRelocations(const DefinedAtom &atom,
170
                                uint64_t atomSectionOffset,
171
                                const Reference &ref,
172
                                FindSymbolIndexForAtom symbolIndexForAtom,
173
                                FindSectionIndexForAtom sectionIndexForAtom,
174
                                FindAddressForAtom addressForAtom,
175
                                normalized::Relocations &relocs) override;
176
177
private:
178
  static const Registry::KindStrings _sKindStrings[];
179
  static const StubInfo _sStubInfo;
180
181
  enum Arm64Kind : Reference::KindValue {
182
    invalid,               /// for error condition
183
184
    // Kinds found in mach-o .o files:
185
    branch26,              /// ex: bl   _foo
186
    page21,                /// ex: adrp x1, _foo@PAGE
187
    offset12,              /// ex: ldrb w0, [x1, _foo@PAGEOFF]
188
    offset12scale2,        /// ex: ldrs w0, [x1, _foo@PAGEOFF]
189
    offset12scale4,        /// ex: ldr  w0, [x1, _foo@PAGEOFF]
190
    offset12scale8,        /// ex: ldr  x0, [x1, _foo@PAGEOFF]
191
    offset12scale16,       /// ex: ldr  q0, [x1, _foo@PAGEOFF]
192
    gotPage21,             /// ex: adrp x1, _foo@GOTPAGE
193
    gotOffset12,           /// ex: ldr  w0, [x1, _foo@GOTPAGEOFF]
194
    tlvPage21,             /// ex: adrp x1, _foo@TLVPAGE
195
    tlvOffset12,           /// ex: ldr  w0, [x1, _foo@TLVPAGEOFF]
196
197
    pointer64,             /// ex: .quad _foo
198
    delta64,               /// ex: .quad _foo - .
199
    delta32,               /// ex: .long _foo - .
200
    negDelta32,            /// ex: .long . - _foo
201
    pointer64ToGOT,        /// ex: .quad _foo@GOT
202
    delta32ToGOT,          /// ex: .long _foo@GOT - .
203
204
    // Kinds introduced by Passes:
205
    addOffset12,           /// Location contains LDR to change into ADD.
206
    lazyPointer,           /// Location contains a lazy pointer.
207
    lazyImmediateLocation, /// Location contains immediate value used in stub.
208
    imageOffset,           /// Location contains offset of atom in final image
209
    imageOffsetGot,        /// Location contains offset of GOT entry for atom in
210
                           /// final image (typically personality function).
211
    unwindCIEToPersonalityFunction,   /// Nearly delta32ToGOT, but cannot be
212
                           /// rematerialized in relocatable object
213
                           /// (yay for implicit contracts!).
214
    unwindFDEToFunction,   /// Nearly delta64, but cannot be rematerialized in
215
                           /// relocatable object (yay for implicit contracts!).
216
    unwindInfoToEhFrame,   /// Fix low 24 bits of compact unwind encoding to
217
                           /// refer to __eh_frame entry.
218
  };
219
220
  void applyFixupFinal(const Reference &ref, uint8_t *location,
221
                       uint64_t fixupAddress, uint64_t targetAddress,
222
                       uint64_t inAtomAddress, uint64_t imageBaseAddress,
223
                       FindAddressForAtom findSectionAddress);
224
225
  void applyFixupRelocatable(const Reference &ref, uint8_t *location,
226
                             uint64_t fixupAddress, uint64_t targetAddress,
227
                             uint64_t inAtomAddress, bool targetUnnamed);
228
229
  // Utility functions for inspecting/updating instructions.
230
  static uint32_t setDisplacementInBranch26(uint32_t instr, int32_t disp);
231
  static uint32_t setDisplacementInADRP(uint32_t instr, int64_t disp);
232
  static Arm64Kind offset12KindFromInstruction(uint32_t instr);
233
  static uint32_t setImm12(uint32_t instr, uint32_t offset);
234
};
235
236
const Registry::KindStrings ArchHandler_arm64::_sKindStrings[] = {
237
  LLD_KIND_STRING_ENTRY(invalid),
238
  LLD_KIND_STRING_ENTRY(branch26),
239
  LLD_KIND_STRING_ENTRY(page21),
240
  LLD_KIND_STRING_ENTRY(offset12),
241
  LLD_KIND_STRING_ENTRY(offset12scale2),
242
  LLD_KIND_STRING_ENTRY(offset12scale4),
243
  LLD_KIND_STRING_ENTRY(offset12scale8),
244
  LLD_KIND_STRING_ENTRY(offset12scale16),
245
  LLD_KIND_STRING_ENTRY(gotPage21),
246
  LLD_KIND_STRING_ENTRY(gotOffset12),
247
  LLD_KIND_STRING_ENTRY(tlvPage21),
248
  LLD_KIND_STRING_ENTRY(tlvOffset12),
249
  LLD_KIND_STRING_ENTRY(pointer64),
250
  LLD_KIND_STRING_ENTRY(delta64),
251
  LLD_KIND_STRING_ENTRY(delta32),
252
  LLD_KIND_STRING_ENTRY(negDelta32),
253
  LLD_KIND_STRING_ENTRY(pointer64ToGOT),
254
  LLD_KIND_STRING_ENTRY(delta32ToGOT),
255
256
  LLD_KIND_STRING_ENTRY(addOffset12),
257
  LLD_KIND_STRING_ENTRY(lazyPointer),
258
  LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
259
  LLD_KIND_STRING_ENTRY(imageOffset),
260
  LLD_KIND_STRING_ENTRY(imageOffsetGot),
261
  LLD_KIND_STRING_ENTRY(unwindCIEToPersonalityFunction),
262
  LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
263
  LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
264
265
  LLD_KIND_STRING_END
266
};
267
268
const ArchHandler::StubInfo ArchHandler_arm64::_sStubInfo = {
269
  "dyld_stub_binder",
270
271
  // Lazy pointer references
272
  { Reference::KindArch::AArch64, pointer64, 0, 0 },
273
  { Reference::KindArch::AArch64, lazyPointer, 0, 0 },
274
275
  // GOT pointer to dyld_stub_binder
276
  { Reference::KindArch::AArch64, pointer64, 0, 0 },
277
278
  // arm64 code alignment 2^1
279
  1,
280
281
  // Stub size and code
282
  12,
283
  { 0x10, 0x00, 0x00, 0x90,   // ADRP  X16, lazy_pointer@page
284
    0x10, 0x02, 0x40, 0xF9,   // LDR   X16, [X16, lazy_pointer@pageoff]
285
    0x00, 0x02, 0x1F, 0xD6 }, // BR    X16
286
  { Reference::KindArch::AArch64, page21, 0, 0 },
287
  { true,                         offset12scale8, 4, 0 },
288
289
  // Stub Helper size and code
290
  12,
291
  { 0x50, 0x00, 0x00, 0x18,   //      LDR   W16, L0
292
    0x00, 0x00, 0x00, 0x14,   //      LDR   B  helperhelper
293
    0x00, 0x00, 0x00, 0x00 }, // L0: .long 0
294
  { Reference::KindArch::AArch64, lazyImmediateLocation, 8, 0 },
295
  { Reference::KindArch::AArch64, branch26, 4, 0 },
296
297
  // Stub helper image cache content type
298
  DefinedAtom::typeGOT,
299
300
  // Stub Helper-Common size and code
301
  24,
302
  // Stub helper alignment
303
  2,
304
  { 0x11, 0x00, 0x00, 0x90,   //  ADRP  X17, dyld_ImageLoaderCache@page
305
    0x31, 0x02, 0x00, 0x91,   //  ADD   X17, X17, dyld_ImageLoaderCache@pageoff
306
    0xF0, 0x47, 0xBF, 0xA9,   //  STP   X16/X17, [SP, #-16]!
307
    0x10, 0x00, 0x00, 0x90,   //  ADRP  X16, _fast_lazy_bind@page
308
    0x10, 0x02, 0x40, 0xF9,   //  LDR   X16, [X16,_fast_lazy_bind@pageoff]
309
    0x00, 0x02, 0x1F, 0xD6 }, //  BR    X16
310
  { Reference::KindArch::AArch64, page21,   0, 0 },
311
  { true,                         offset12, 4, 0 },
312
  { Reference::KindArch::AArch64, page21,   12, 0 },
313
  { true,                         offset12scale8, 16, 0 }
314
};
315
316
0
bool ArchHandler_arm64::isCallSite(const Reference &ref) {
317
0
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
318
0
    return false;
319
0
  assert(ref.kindArch() == Reference::KindArch::AArch64);
320
0
  return (ref.kindValue() == branch26);
321
0
}
322
323
0
bool ArchHandler_arm64::isPointer(const Reference &ref) {
324
0
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
325
0
    return false;
326
0
  assert(ref.kindArch() == Reference::KindArch::AArch64);
327
0
  Reference::KindValue kind = ref.kindValue();
328
0
  return (kind == pointer64);
329
0
}
330
331
30
bool ArchHandler_arm64::isPairedReloc(const Relocation &r) {
332
30
  return ((r.type == ARM64_RELOC_ADDEND) || (r.type == ARM64_RELOC_SUBTRACTOR));
333
30
}
334
335
uint32_t ArchHandler_arm64::setDisplacementInBranch26(uint32_t instr,
336
2
                                                      int32_t displacement) {
337
2
  assert((displacement <= 134217727) && (displacement > (-134217728)) &&
338
2
         "arm64 branch out of range");
339
2
  return (instr & 0xFC000000) | ((uint32_t)(displacement >> 2) & 0x03FFFFFF);
340
2
}
341
342
uint32_t ArchHandler_arm64::setDisplacementInADRP(uint32_t instruction,
343
2
                                                  int64_t displacement) {
344
2
  assert((displacement <= 0x100000000LL) && (displacement > (-0x100000000LL)) &&
345
2
         "arm64 ADRP out of range");
346
2
  assert(((instruction & 0x9F000000) == 0x90000000) &&
347
2
         "reloc not on ADRP instruction");
348
2
  uint32_t immhi = (displacement >> 9) & (0x00FFFFE0);
349
2
  uint32_t immlo = (displacement << 17) & (0x60000000);
350
2
  return (instruction & 0x9F00001F) | immlo | immhi;
351
2
}
352
353
ArchHandler_arm64::Arm64Kind
354
2
ArchHandler_arm64::offset12KindFromInstruction(uint32_t instruction) {
355
2
  if (instruction & 0x08000000) {
356
0
    switch ((instruction >> 30) & 0x3) {
357
0
    case 0:
358
0
      if ((instruction & 0x04800000) == 0x04800000)
359
0
        return offset12scale16;
360
0
      return offset12;
361
0
    case 1:
362
0
      return offset12scale2;
363
0
    case 2:
364
0
      return offset12scale4;
365
0
    case 3:
366
0
      return offset12scale8;
367
2
    }
368
2
  }
369
2
  return offset12;
370
2
}
371
372
2
uint32_t ArchHandler_arm64::setImm12(uint32_t instruction, uint32_t offset) {
373
2
  assert(((offset & 0xFFFFF000) == 0) && "imm12 offset out of range");
374
2
  uint32_t imm12 = offset << 10;
375
2
  return (instruction & 0xFFC003FF) | imm12;
376
2
}
377
378
llvm::Error ArchHandler_arm64::getReferenceInfo(
379
    const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom,
380
    uint64_t fixupAddress, bool isBig,
381
    FindAtomBySectionAndAddress atomFromAddress,
382
    FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
383
20
    const lld::Atom **target, Reference::Addend *addend) {
384
20
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
385
20
  switch (relocPattern(reloc)) {
386
20
  case ARM64_RELOC_BRANCH26           | rPcRel | rExtern | rLength4:
387
2
    // ex: bl _foo
388
2
    *kind = branch26;
389
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
390
0
      return ec;
391
2
    *addend = 0;
392
2
    return llvm::Error::success();
393
2
  case ARM64_RELOC_PAGE21             | rPcRel | rExtern | rLength4:
394
2
    // ex: adrp x1, _foo@PAGE
395
2
    *kind = page21;
396
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
397
0
      return ec;
398
2
    *addend = 0;
399
2
    return llvm::Error::success();
400
2
  case ARM64_RELOC_PAGEOFF12                   | rExtern | rLength4:
401
2
    // ex: ldr x0, [x1, _foo@PAGEOFF]
402
2
    *kind = offset12KindFromInstruction(*(const little32_t *)fixupContent);
403
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
404
0
      return ec;
405
2
    *addend = 0;
406
2
    return llvm::Error::success();
407
2
  case ARM64_RELOC_GOT_LOAD_PAGE21    | rPcRel | rExtern | rLength4:
408
0
    // ex: adrp x1, _foo@GOTPAGE
409
0
    *kind = gotPage21;
410
0
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
411
0
      return ec;
412
0
    *addend = 0;
413
0
    return llvm::Error::success();
414
0
  case ARM64_RELOC_GOT_LOAD_PAGEOFF12          | rExtern | rLength4:
415
0
    // ex: ldr x0, [x1, _foo@GOTPAGEOFF]
416
0
    *kind = gotOffset12;
417
0
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
418
0
      return ec;
419
0
    *addend = 0;
420
0
    return llvm::Error::success();
421
0
  case ARM64_RELOC_TLVP_LOAD_PAGE21   | rPcRel | rExtern | rLength4:
422
0
    // ex: adrp x1, _foo@TLVPAGE
423
0
    *kind = tlvPage21;
424
0
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
425
0
      return ec;
426
0
    *addend = 0;
427
0
    return llvm::Error::success();
428
0
  case ARM64_RELOC_TLVP_LOAD_PAGEOFF12         | rExtern | rLength4:
429
0
    // ex: ldr x0, [x1, _foo@TLVPAGEOFF]
430
0
    *kind = tlvOffset12;
431
0
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
432
0
      return ec;
433
0
    *addend = 0;
434
0
    return llvm::Error::success();
435
4
  case ARM64_RELOC_UNSIGNED                    | rExtern | rLength8:
436
4
    // ex: .quad _foo + N
437
4
    *kind = pointer64;
438
4
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
439
0
      return ec;
440
4
    *addend = *(const little64_t *)fixupContent;
441
4
    return llvm::Error::success();
442
6
  case ARM64_RELOC_UNSIGNED                              | rLength8:
443
6
     // ex: .quad Lfoo + N
444
6
     *kind = pointer64;
445
6
     return atomFromAddress(reloc.symbol, *(const little64_t *)fixupContent,
446
6
                            target, addend);
447
4
  case ARM64_RELOC_POINTER_TO_GOT              | rExtern | rLength8:
448
1
    // ex: .quad _foo@GOT
449
1
    *kind = pointer64ToGOT;
450
1
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
451
0
      return ec;
452
1
    *addend = 0;
453
1
    return llvm::Error::success();
454
3
  case ARM64_RELOC_POINTER_TO_GOT     | rPcRel | rExtern | rLength4:
455
3
    // ex: .long _foo@GOT - .
456
3
457
3
    // If we are in an .eh_frame section, then the kind of the relocation should
458
3
    // not be delta32ToGOT.  It may instead be unwindCIEToPersonalityFunction.
459
3
    if (inAtom->contentType() == DefinedAtom::typeCFI)
460
2
      *kind = unwindCIEToPersonalityFunction;
461
1
    else
462
1
      *kind = delta32ToGOT;
463
3
464
3
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
465
0
      return ec;
466
3
    *addend = 0;
467
3
    return llvm::Error::success();
468
3
  default:
469
0
    return llvm::make_error<GenericError>("unsupported arm64 relocation type");
470
0
  }
471
0
}
472
473
llvm::Error ArchHandler_arm64::getPairReferenceInfo(
474
    const normalized::Relocation &reloc1, const normalized::Relocation &reloc2,
475
    const DefinedAtom *inAtom, uint32_t offsetInAtom, uint64_t fixupAddress,
476
    bool swap, bool scatterable, FindAtomBySectionAndAddress atomFromAddress,
477
    FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
478
10
    const lld::Atom **target, Reference::Addend *addend) {
479
10
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
480
10
  switch (relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
481
10
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
482
0
         ARM64_RELOC_BRANCH26           | rPcRel | rExtern | rLength4):
483
0
    // ex: bl _foo+8
484
0
    *kind = branch26;
485
0
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
486
0
      return ec;
487
0
    *addend = reloc1.symbol;
488
0
    return llvm::Error::success();
489
0
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
490
0
         ARM64_RELOC_PAGE21             | rPcRel | rExtern | rLength4):
491
0
    // ex: adrp x1, _foo@PAGE
492
0
    *kind = page21;
493
0
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
494
0
      return ec;
495
0
    *addend = reloc1.symbol;
496
0
    return llvm::Error::success();
497
0
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
498
0
         ARM64_RELOC_PAGEOFF12                   | rExtern | rLength4): {
499
0
    // ex: ldr w0, [x1, _foo@PAGEOFF]
500
0
    uint32_t cont32 = (int32_t)*(const little32_t *)fixupContent;
501
0
    *kind = offset12KindFromInstruction(cont32);
502
0
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
503
0
      return ec;
504
0
    *addend = reloc1.symbol;
505
0
    return llvm::Error::success();
506
0
  }
507
8
  case ((ARM64_RELOC_SUBTRACTOR                  | rExtern | rLength8) << 16 |
508
8
         ARM64_RELOC_UNSIGNED                    | rExtern | rLength8):
509
8
    // ex: .quad _foo - .
510
8
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
511
0
      return ec;
512
8
513
8
    // If we are in an .eh_frame section, then the kind of the relocation should
514
8
    // not be delta64.  It may instead be unwindFDEToFunction.
515
8
    if (inAtom->contentType() == DefinedAtom::typeCFI)
516
4
      *kind = unwindFDEToFunction;
517
4
    else
518
4
      *kind = delta64;
519
8
520
8
    // The offsets of the 2 relocations must match
521
8
    if (reloc1.offset != reloc2.offset)
522
1
      return llvm::make_error<GenericError>(
523
1
                                    "paired relocs must have the same offset");
524
7
    *addend = (int64_t)*(const little64_t *)fixupContent + offsetInAtom;
525
7
    return llvm::Error::success();
526
7
  case ((ARM64_RELOC_SUBTRACTOR                  | rExtern | rLength4) << 16 |
527
2
         ARM64_RELOC_UNSIGNED                    | rExtern | rLength4):
528
2
    // ex: .quad _foo - .
529
2
    *kind = delta32;
530
2
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
531
0
      return ec;
532
2
    *addend = (int32_t)*(const little32_t *)fixupContent + offsetInAtom;
533
2
    return llvm::Error::success();
534
2
  default:
535
0
    return llvm::make_error<GenericError>("unsupported arm64 relocation pair");
536
0
  }
537
0
}
538
539
void ArchHandler_arm64::generateAtomContent(
540
    const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
541
    FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
542
33
    llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
543
33
  // Copy raw bytes.
544
33
  std::copy(atom.rawContent().begin(), atom.rawContent().end(),
545
33
            atomContentBuffer.begin());
546
33
  // Apply fix-ups.
547
#ifndef NDEBUG
548
  if (atom.begin() != atom.end()) {
549
    DEBUG_WITH_TYPE("atom-content", llvm::dbgs()
550
                    << "Applying fixups to atom:\n"
551
                    << "   address="
552
                    << llvm::format("    0x%09lX", &atom)
553
                    << ", file=#"
554
                    << atom.file().ordinal()
555
                    << ", atom=#"
556
                    << atom.ordinal()
557
                    << ", name="
558
                    << atom.name()
559
                    << ", type="
560
                    << atom.contentType()
561
                    << "\n");
562
  }
563
#endif
564
39
  for (const Reference *ref : atom) {
565
39
    uint32_t offset = ref->offsetInAtom();
566
39
    const Atom *target = ref->target();
567
39
    bool targetUnnamed = target->name().empty();
568
39
    uint64_t targetAddress = 0;
569
39
    if (isa<DefinedAtom>(target))
570
27
      targetAddress = findAddress(*target);
571
39
    uint64_t atomAddress = findAddress(atom);
572
39
    uint64_t fixupAddress = atomAddress + offset;
573
39
    if (relocatable) {
574
39
      applyFixupRelocatable(*ref, &atomContentBuffer[offset], fixupAddress,
575
39
                            targetAddress, atomAddress, targetUnnamed);
576
39
    } else {
577
0
      applyFixupFinal(*ref, &atomContentBuffer[offset], fixupAddress,
578
0
                      targetAddress, atomAddress, imageBaseAddress,
579
0
                      findSectionAddress);
580
0
    }
581
39
  }
582
33
}
583
584
void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc,
585
                                        uint64_t fixupAddress,
586
                                        uint64_t targetAddress,
587
                                        uint64_t inAtomAddress,
588
                                        uint64_t imageBaseAddress,
589
0
                                        FindAddressForAtom findSectionAddress) {
590
0
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
591
0
    return;
592
0
  assert(ref.kindArch() == Reference::KindArch::AArch64);
593
0
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
594
0
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
595
0
  int32_t displacement;
596
0
  uint32_t instruction;
597
0
  uint32_t value32;
598
0
  uint32_t value64;
599
0
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
600
0
  case branch26:
601
0
    displacement = (targetAddress - fixupAddress) + ref.addend();
602
0
    *loc32 = setDisplacementInBranch26(*loc32, displacement);
603
0
    return;
604
0
  case page21:
605
0
  case gotPage21:
606
0
  case tlvPage21:
607
0
    displacement =
608
0
        ((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096));
609
0
    *loc32 = setDisplacementInADRP(*loc32, displacement);
610
0
    return;
611
0
  case offset12:
612
0
  case gotOffset12:
613
0
  case tlvOffset12:
614
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
615
0
    *loc32 = setImm12(*loc32, displacement);
616
0
    return;
617
0
  case offset12scale2:
618
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
619
0
    assert(((displacement & 0x1) == 0) &&
620
0
           "scaled imm12 not accessing 2-byte aligneds");
621
0
    *loc32 = setImm12(*loc32, displacement >> 1);
622
0
    return;
623
0
  case offset12scale4:
624
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
625
0
    assert(((displacement & 0x3) == 0) &&
626
0
           "scaled imm12 not accessing 4-byte aligned");
627
0
    *loc32 = setImm12(*loc32, displacement >> 2);
628
0
    return;
629
0
  case offset12scale8:
630
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
631
0
    assert(((displacement & 0x7) == 0) &&
632
0
           "scaled imm12 not accessing 8-byte aligned");
633
0
    *loc32 = setImm12(*loc32, displacement >> 3);
634
0
    return;
635
0
  case offset12scale16:
636
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
637
0
    assert(((displacement & 0xF) == 0) &&
638
0
           "scaled imm12 not accessing 16-byte aligned");
639
0
    *loc32 = setImm12(*loc32, displacement >> 4);
640
0
    return;
641
0
  case addOffset12:
642
0
    instruction = *loc32;
643
0
    assert(((instruction & 0xFFC00000) == 0xF9400000) &&
644
0
           "GOT reloc is not an LDR instruction");
645
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
646
0
    value32 = 0x91000000 | (instruction & 0x000003FF);
647
0
    instruction = setImm12(value32, displacement);
648
0
    *loc32 = instruction;
649
0
    return;
650
0
  case pointer64:
651
0
  case pointer64ToGOT:
652
0
    *loc64 = targetAddress + ref.addend();
653
0
    return;
654
0
  case delta64:
655
0
  case unwindFDEToFunction:
656
0
    *loc64 = (targetAddress - fixupAddress) + ref.addend();
657
0
    return;
658
0
  case delta32:
659
0
  case delta32ToGOT:
660
0
  case unwindCIEToPersonalityFunction:
661
0
    *loc32 = (targetAddress - fixupAddress) + ref.addend();
662
0
    return;
663
0
  case negDelta32:
664
0
    *loc32 = fixupAddress - targetAddress + ref.addend();
665
0
    return;
666
0
  case lazyPointer:
667
0
    // Do nothing
668
0
    return;
669
0
  case lazyImmediateLocation:
670
0
    *loc32 = ref.addend();
671
0
    return;
672
0
  case imageOffset:
673
0
    *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
674
0
    return;
675
0
  case imageOffsetGot:
676
0
    llvm_unreachable("imageOffsetGot should have been changed to imageOffset");
677
0
    break;
678
0
  case unwindInfoToEhFrame:
679
0
    value64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
680
0
    assert(value64 < 0xffffffU && "offset in __eh_frame too large");
681
0
    *loc32 = (*loc32 & 0xff000000U) | value64;
682
0
    return;
683
0
  case invalid:
684
0
    // Fall into llvm_unreachable().
685
0
    break;
686
0
  }
687
0
  llvm_unreachable("invalid arm64 Reference Kind");
688
0
}
689
690
void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref,
691
                                              uint8_t *loc,
692
                                              uint64_t fixupAddress,
693
                                              uint64_t targetAddress,
694
                                              uint64_t inAtomAddress,
695
39
                                              bool targetUnnamed) {
696
39
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
697
0
    return;
698
39
  assert(ref.kindArch() == Reference::KindArch::AArch64);
699
39
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
700
39
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
701
39
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
702
39
  case branch26:
703
2
    *loc32 = setDisplacementInBranch26(*loc32, 0);
704
2
    return;
705
39
  case page21:
706
2
  case gotPage21:
707
2
  case tlvPage21:
708
2
    *loc32 = setDisplacementInADRP(*loc32, 0);
709
2
    return;
710
2
  case offset12:
711
2
  case offset12scale2:
712
2
  case offset12scale4:
713
2
  case offset12scale8:
714
2
  case offset12scale16:
715
2
  case gotOffset12:
716
2
  case tlvOffset12:
717
2
    *loc32 = setImm12(*loc32, 0);
718
2
    return;
719
10
  case pointer64:
720
10
    if (targetUnnamed)
721
1
      *loc64 = targetAddress + ref.addend();
722
9
    else
723
9
      *loc64 = ref.addend();
724
10
    return;
725
3
  case delta64:
726
3
    *loc64 = ref.addend() + inAtomAddress - fixupAddress;
727
3
    return;
728
8
  case unwindFDEToFunction:
729
8
    // We don't emit unwindFDEToFunction in -r mode as they are implicitly
730
8
    // generated from the data in the __eh_frame section.  So here we need
731
8
    // to use the targetAddress so that we can generate the full relocation
732
8
    // when we parse again later.
733
8
    *loc64 = targetAddress - fixupAddress;
734
8
    return;
735
2
  case delta32:
736
2
    *loc32 = ref.addend() + inAtomAddress - fixupAddress;
737
2
    return;
738
4
  case negDelta32:
739
4
    // We don't emit negDelta32 in -r mode as they are implicitly
740
4
    // generated from the data in the __eh_frame section.  So here we need
741
4
    // to use the targetAddress so that we can generate the full relocation
742
4
    // when we parse again later.
743
4
    *loc32 = fixupAddress - targetAddress + ref.addend();
744
4
    return;
745
2
  case pointer64ToGOT:
746
1
    *loc64 = 0;
747
1
    return;
748
2
  case delta32ToGOT:
749
1
    *loc32 = inAtomAddress - fixupAddress;
750
1
    return;
751
4
  case unwindCIEToPersonalityFunction:
752
4
    // We don't emit unwindCIEToPersonalityFunction in -r mode as they are
753
4
    // implicitly generated from the data in the __eh_frame section.  So here we
754
4
    // need to use the targetAddress so that we can generate the full relocation
755
4
    // when we parse again later.
756
4
    *loc32 = targetAddress - fixupAddress;
757
4
    return;
758
2
  case addOffset12:
759
0
    llvm_unreachable("lazy reference kind implies GOT pass was run");
760
2
  case lazyPointer:
761
0
  case lazyImmediateLocation:
762
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
763
0
  case imageOffset:
764
0
  case imageOffsetGot:
765
0
  case unwindInfoToEhFrame:
766
0
    llvm_unreachable("fixup implies __unwind_info");
767
0
    return;
768
0
  case invalid:
769
0
    // Fall into llvm_unreachable().
770
0
    break;
771
0
  }
772
0
  llvm_unreachable("unknown arm64 Reference Kind");
773
0
}
774
775
void ArchHandler_arm64::appendSectionRelocations(
776
    const DefinedAtom &atom, uint64_t atomSectionOffset, const Reference &ref,
777
    FindSymbolIndexForAtom symbolIndexForAtom,
778
    FindSectionIndexForAtom sectionIndexForAtom,
779
23
    FindAddressForAtom addressForAtom, normalized::Relocations &relocs) {
780
23
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
781
0
    return;
782
23
  assert(ref.kindArch() == Reference::KindArch::AArch64);
783
23
  uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
784
23
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
785
23
  case branch26:
786
2
    if (ref.addend()) {
787
0
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
788
0
                  ARM64_RELOC_ADDEND | rLength4);
789
0
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
790
0
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
791
2
     } else {
792
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
793
2
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
794
2
    }
795
2
    return;
796
23
  case page21:
797
2
    if (ref.addend()) {
798
0
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
799
0
                  ARM64_RELOC_ADDEND | rLength4);
800
0
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
801
0
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
802
2
     } else {
803
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
804
2
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
805
2
    }
806
2
    return;
807
23
  case offset12:
808
2
  case offset12scale2:
809
2
  case offset12scale4:
810
2
  case offset12scale8:
811
2
  case offset12scale16:
812
2
    if (ref.addend()) {
813
0
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
814
0
                  ARM64_RELOC_ADDEND | rLength4);
815
0
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
816
0
                  ARM64_RELOC_PAGEOFF12  | rExtern | rLength4);
817
2
     } else {
818
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
819
2
                  ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
820
2
    }
821
2
    return;
822
2
  case gotPage21:
823
0
    assert(ref.addend() == 0);
824
0
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
825
0
                  ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
826
0
    return;
827
2
  case gotOffset12:
828
0
    assert(ref.addend() == 0);
829
0
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
830
0
                  ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4);
831
0
    return;
832
2
  case tlvPage21:
833
0
    assert(ref.addend() == 0);
834
0
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
835
0
                  ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
836
0
    return;
837
2
  case tlvOffset12:
838
0
    assert(ref.addend() == 0);
839
0
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
840
0
                  ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4);
841
0
    return;
842
10
  case pointer64:
843
10
    if (ref.target()->name().empty())
844
1
      appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
845
1
                  ARM64_RELOC_UNSIGNED           | rLength8);
846
9
    else
847
9
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
848
9
                  ARM64_RELOC_UNSIGNED | rExtern | rLength8);
849
10
    return;
850
3
  case delta64:
851
3
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
852
3
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength8);
853
3
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
854
3
                ARM64_RELOC_UNSIGNED  | rExtern | rLength8);
855
3
    return;
856
2
  case delta32:
857
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
858
2
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength4 );
859
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
860
2
                ARM64_RELOC_UNSIGNED   | rExtern | rLength4 );
861
2
    return;
862
2
  case pointer64ToGOT:
863
1
    assert(ref.addend() == 0);
864
1
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
865
1
                  ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8);
866
1
    return;
867
2
  case delta32ToGOT:
868
1
    assert(ref.addend() == 0);
869
1
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
870
1
                  ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4);
871
1
    return;
872
2
  case addOffset12:
873
0
    llvm_unreachable("lazy reference kind implies GOT pass was run");
874
2
  case lazyPointer:
875
0
  case lazyImmediateLocation:
876
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
877
0
  case imageOffset:
878
0
  case imageOffsetGot:
879
0
    llvm_unreachable("deltas from mach_header can only be in final images");
880
0
  case unwindCIEToPersonalityFunction:
881
0
  case unwindFDEToFunction:
882
0
  case unwindInfoToEhFrame:
883
0
  case negDelta32:
884
0
    // Do nothing.
885
0
    return;
886
0
  case invalid:
887
0
    // Fall into llvm_unreachable().
888
0
    break;
889
0
  }
890
0
  llvm_unreachable("unknown arm64 Reference Kind");
891
0
}
892
893
12
std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_arm64() {
894
12
  return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_arm64());
895
12
}
896
897
} // namespace mach_o
898
} // namespace lld