Coverage Report

Created: 2019-05-19 14:56

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/lld/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- lib/FileFormat/MachO/ArchHandler_arm64.cpp -------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "ArchHandler.h"
10
#include "Atoms.h"
11
#include "MachONormalizedFileBinaryUtils.h"
12
#include "llvm/ADT/StringRef.h"
13
#include "llvm/ADT/StringSwitch.h"
14
#include "llvm/ADT/Triple.h"
15
#include "llvm/Support/Endian.h"
16
#include "llvm/Support/ErrorHandling.h"
17
#include "llvm/Support/Format.h"
18
19
using namespace llvm::MachO;
20
using namespace lld::mach_o::normalized;
21
22
namespace lld {
23
namespace mach_o {
24
25
using llvm::support::ulittle32_t;
26
using llvm::support::ulittle64_t;
27
28
using llvm::support::little32_t;
29
using llvm::support::little64_t;
30
31
class ArchHandler_arm64 : public ArchHandler {
32
public:
33
35
  ArchHandler_arm64() = default;
34
35
  ~ArchHandler_arm64() override = default;
35
36
18
  const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
37
38
156
  Reference::KindArch kindArch() override {
39
156
    return Reference::KindArch::AArch64;
40
156
  }
41
42
  /// Used by GOTPass to locate GOT References
43
96
  bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
44
96
    if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
45
0
      return false;
46
96
    assert(ref.kindArch() == Reference::KindArch::AArch64);
47
96
    switch (ref.kindValue()) {
48
96
    case gotPage21:
49
8
    case gotOffset12:
50
8
      canBypassGOT = true;
51
8
      return true;
52
8
    case delta32ToGOT:
53
3
    case unwindCIEToPersonalityFunction:
54
3
    case imageOffsetGot:
55
3
      canBypassGOT = false;
56
3
      return true;
57
85
    default:
58
85
      return false;
59
96
    }
60
96
  }
61
62
  /// Used by GOTPass to update GOT References.
63
11
  void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
64
11
    // If GOT slot was instanciated, transform:
65
11
    //   gotPage21/gotOffset12 -> page21/offset12scale8
66
11
    // If GOT slot optimized away, transform:
67
11
    //   gotPage21/gotOffset12 -> page21/addOffset12
68
11
    assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
69
11
    assert(ref->kindArch() == Reference::KindArch::AArch64);
70
11
    switch (ref->kindValue()) {
71
11
    case gotPage21:
72
4
      const_cast<Reference *>(ref)->setKindValue(page21);
73
4
      break;
74
11
    case gotOffset12:
75
4
      const_cast<Reference *>(ref)->setKindValue(targetNowGOT ?
76
4
                                                 offset12scale8 : 
addOffset120
);
77
4
      break;
78
11
    case delta32ToGOT:
79
2
      const_cast<Reference *>(ref)->setKindValue(delta32);
80
2
      break;
81
11
    case imageOffsetGot:
82
1
      const_cast<Reference *>(ref)->setKindValue(imageOffset);
83
1
      break;
84
11
    default:
85
0
      llvm_unreachable("Not a GOT reference");
86
11
    }
87
11
  }
88
89
130
  const StubInfo &stubInfo() override { return _sStubInfo; }
90
91
  bool isCallSite(const Reference &) override;
92
0
  bool isNonCallBranch(const Reference &) override {
93
0
    return false;
94
0
  }
95
96
  bool isPointer(const Reference &) override;
97
  bool isPairedReloc(const normalized::Relocation &) override;
98
99
5
  bool needsCompactUnwind() override {
100
5
    return true;
101
5
  }
102
7
  Reference::KindValue imageOffsetKind() override {
103
7
    return imageOffset;
104
7
  }
105
1
  Reference::KindValue imageOffsetKindIndirect() override {
106
1
    return imageOffsetGot;
107
1
  }
108
109
4
  Reference::KindValue unwindRefToPersonalityFunctionKind() override {
110
4
    return unwindCIEToPersonalityFunction;
111
4
  }
112
113
8
  Reference::KindValue unwindRefToCIEKind() override {
114
8
    return negDelta32;
115
8
  }
116
117
14
  Reference::KindValue unwindRefToFunctionKind() override {
118
14
    return unwindFDEToFunction;
119
14
  }
120
121
0
  Reference::KindValue unwindRefToEhFrameKind() override {
122
0
    return unwindInfoToEhFrame;
123
0
  }
124
125
0
  Reference::KindValue pointerKind() override {
126
0
    return pointer64;
127
0
  }
128
129
7
  Reference::KindValue lazyImmediateLocationKind() override {
130
7
    return lazyImmediateLocation;
131
7
  }
132
133
6
  uint32_t dwarfCompactUnwindType() override {
134
6
    return 0x03000000;
135
6
  }
136
137
  llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
138
                               const DefinedAtom *inAtom,
139
                               uint32_t offsetInAtom,
140
                               uint64_t fixupAddress, bool isBig,
141
                               FindAtomBySectionAndAddress atomFromAddress,
142
                               FindAtomBySymbolIndex atomFromSymbolIndex,
143
                               Reference::KindValue *kind,
144
                               const lld::Atom **target,
145
                               Reference::Addend *addend) override;
146
  llvm::Error
147
      getPairReferenceInfo(const normalized::Relocation &reloc1,
148
                           const normalized::Relocation &reloc2,
149
                           const DefinedAtom *inAtom,
150
                           uint32_t offsetInAtom,
151
                           uint64_t fixupAddress, bool isBig, bool scatterable,
152
                           FindAtomBySectionAndAddress atomFromAddress,
153
                           FindAtomBySymbolIndex atomFromSymbolIndex,
154
                           Reference::KindValue *kind,
155
                           const lld::Atom **target,
156
                           Reference::Addend *addend) override;
157
158
36
  bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
159
36
    return (atom->contentType() == DefinedAtom::typeCString);
160
36
  }
161
162
  void generateAtomContent(const DefinedAtom &atom, bool relocatable,
163
                           FindAddressForAtom findAddress,
164
                           FindAddressForAtom findSectionAddress,
165
                           uint64_t imageBaseAddress,
166
                    llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
167
168
  void appendSectionRelocations(const DefinedAtom &atom,
169
                                uint64_t atomSectionOffset,
170
                                const Reference &ref,
171
                                FindSymbolIndexForAtom symbolIndexForAtom,
172
                                FindSectionIndexForAtom sectionIndexForAtom,
173
                                FindAddressForAtom addressForAtom,
174
                                normalized::Relocations &relocs) override;
175
176
private:
177
  static const Registry::KindStrings _sKindStrings[];
178
  static const StubInfo _sStubInfo;
179
180
  enum Arm64Kind : Reference::KindValue {
181
    invalid,               /// for error condition
182
183
    // Kinds found in mach-o .o files:
184
    branch26,              /// ex: bl   _foo
185
    page21,                /// ex: adrp x1, _foo@PAGE
186
    offset12,              /// ex: ldrb w0, [x1, _foo@PAGEOFF]
187
    offset12scale2,        /// ex: ldrs w0, [x1, _foo@PAGEOFF]
188
    offset12scale4,        /// ex: ldr  w0, [x1, _foo@PAGEOFF]
189
    offset12scale8,        /// ex: ldr  x0, [x1, _foo@PAGEOFF]
190
    offset12scale16,       /// ex: ldr  q0, [x1, _foo@PAGEOFF]
191
    gotPage21,             /// ex: adrp x1, _foo@GOTPAGE
192
    gotOffset12,           /// ex: ldr  w0, [x1, _foo@GOTPAGEOFF]
193
    tlvPage21,             /// ex: adrp x1, _foo@TLVPAGE
194
    tlvOffset12,           /// ex: ldr  w0, [x1, _foo@TLVPAGEOFF]
195
196
    pointer64,             /// ex: .quad _foo
197
    delta64,               /// ex: .quad _foo - .
198
    delta32,               /// ex: .long _foo - .
199
    negDelta32,            /// ex: .long . - _foo
200
    pointer64ToGOT,        /// ex: .quad _foo@GOT
201
    delta32ToGOT,          /// ex: .long _foo@GOT - .
202
203
    // Kinds introduced by Passes:
204
    addOffset12,           /// Location contains LDR to change into ADD.
205
    lazyPointer,           /// Location contains a lazy pointer.
206
    lazyImmediateLocation, /// Location contains immediate value used in stub.
207
    imageOffset,           /// Location contains offset of atom in final image
208
    imageOffsetGot,        /// Location contains offset of GOT entry for atom in
209
                           /// final image (typically personality function).
210
    unwindCIEToPersonalityFunction,   /// Nearly delta32ToGOT, but cannot be
211
                           /// rematerialized in relocatable object
212
                           /// (yay for implicit contracts!).
213
    unwindFDEToFunction,   /// Nearly delta64, but cannot be rematerialized in
214
                           /// relocatable object (yay for implicit contracts!).
215
    unwindInfoToEhFrame,   /// Fix low 24 bits of compact unwind encoding to
216
                           /// refer to __eh_frame entry.
217
  };
218
219
  void applyFixupFinal(const Reference &ref, uint8_t *location,
220
                       uint64_t fixupAddress, uint64_t targetAddress,
221
                       uint64_t inAtomAddress, uint64_t imageBaseAddress,
222
                       FindAddressForAtom findSectionAddress);
223
224
  void applyFixupRelocatable(const Reference &ref, uint8_t *location,
225
                             uint64_t fixupAddress, uint64_t targetAddress,
226
                             uint64_t inAtomAddress, bool targetUnnamed);
227
228
  // Utility functions for inspecting/updating instructions.
229
  static uint32_t setDisplacementInBranch26(uint32_t instr, int32_t disp);
230
  static uint32_t setDisplacementInADRP(uint32_t instr, int64_t disp);
231
  static Arm64Kind offset12KindFromInstruction(uint32_t instr);
232
  static uint32_t setImm12(uint32_t instr, uint32_t offset);
233
};
234
235
const Registry::KindStrings ArchHandler_arm64::_sKindStrings[] = {
236
  LLD_KIND_STRING_ENTRY(invalid),
237
  LLD_KIND_STRING_ENTRY(branch26),
238
  LLD_KIND_STRING_ENTRY(page21),
239
  LLD_KIND_STRING_ENTRY(offset12),
240
  LLD_KIND_STRING_ENTRY(offset12scale2),
241
  LLD_KIND_STRING_ENTRY(offset12scale4),
242
  LLD_KIND_STRING_ENTRY(offset12scale8),
243
  LLD_KIND_STRING_ENTRY(offset12scale16),
244
  LLD_KIND_STRING_ENTRY(gotPage21),
245
  LLD_KIND_STRING_ENTRY(gotOffset12),
246
  LLD_KIND_STRING_ENTRY(tlvPage21),
247
  LLD_KIND_STRING_ENTRY(tlvOffset12),
248
  LLD_KIND_STRING_ENTRY(pointer64),
249
  LLD_KIND_STRING_ENTRY(delta64),
250
  LLD_KIND_STRING_ENTRY(delta32),
251
  LLD_KIND_STRING_ENTRY(negDelta32),
252
  LLD_KIND_STRING_ENTRY(pointer64ToGOT),
253
  LLD_KIND_STRING_ENTRY(delta32ToGOT),
254
255
  LLD_KIND_STRING_ENTRY(addOffset12),
256
  LLD_KIND_STRING_ENTRY(lazyPointer),
257
  LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
258
  LLD_KIND_STRING_ENTRY(imageOffset),
259
  LLD_KIND_STRING_ENTRY(imageOffsetGot),
260
  LLD_KIND_STRING_ENTRY(unwindCIEToPersonalityFunction),
261
  LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
262
  LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
263
264
  LLD_KIND_STRING_END
265
};
266
267
const ArchHandler::StubInfo ArchHandler_arm64::_sStubInfo = {
268
  "dyld_stub_binder",
269
270
  // Lazy pointer references
271
  { Reference::KindArch::AArch64, pointer64, 0, 0 },
272
  { Reference::KindArch::AArch64, lazyPointer, 0, 0 },
273
274
  // GOT pointer to dyld_stub_binder
275
  { Reference::KindArch::AArch64, pointer64, 0, 0 },
276
277
  // arm64 code alignment 2^1
278
  1,
279
280
  // Stub size and code
281
  12,
282
  { 0x10, 0x00, 0x00, 0x90,   // ADRP  X16, lazy_pointer@page
283
    0x10, 0x02, 0x40, 0xF9,   // LDR   X16, [X16, lazy_pointer@pageoff]
284
    0x00, 0x02, 0x1F, 0xD6 }, // BR    X16
285
  { Reference::KindArch::AArch64, page21, 0, 0 },
286
  { true,                         offset12scale8, 4, 0 },
287
288
  // Stub Helper size and code
289
  12,
290
  { 0x50, 0x00, 0x00, 0x18,   //      LDR   W16, L0
291
    0x00, 0x00, 0x00, 0x14,   //      LDR   B  helperhelper
292
    0x00, 0x00, 0x00, 0x00 }, // L0: .long 0
293
  { Reference::KindArch::AArch64, lazyImmediateLocation, 8, 0 },
294
  { Reference::KindArch::AArch64, branch26, 4, 0 },
295
296
  // Stub helper image cache content type
297
  DefinedAtom::typeGOT,
298
299
  // Stub Helper-Common size and code
300
  24,
301
  // Stub helper alignment
302
  2,
303
  { 0x11, 0x00, 0x00, 0x90,   //  ADRP  X17, dyld_ImageLoaderCache@page
304
    0x31, 0x02, 0x00, 0x91,   //  ADD   X17, X17, dyld_ImageLoaderCache@pageoff
305
    0xF0, 0x47, 0xBF, 0xA9,   //  STP   X16/X17, [SP, #-16]!
306
    0x10, 0x00, 0x00, 0x90,   //  ADRP  X16, _fast_lazy_bind@page
307
    0x10, 0x02, 0x40, 0xF9,   //  LDR   X16, [X16,_fast_lazy_bind@pageoff]
308
    0x00, 0x02, 0x1F, 0xD6 }, //  BR    X16
309
  { Reference::KindArch::AArch64, page21,   0, 0 },
310
  { true,                         offset12, 4, 0 },
311
  { Reference::KindArch::AArch64, page21,   12, 0 },
312
  { true,                         offset12scale8, 16, 0 }
313
};
314
315
29
bool ArchHandler_arm64::isCallSite(const Reference &ref) {
316
29
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
317
0
    return false;
318
29
  assert(ref.kindArch() == Reference::KindArch::AArch64);
319
29
  return (ref.kindValue() == branch26);
320
29
}
321
322
102
bool ArchHandler_arm64::isPointer(const Reference &ref) {
323
102
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
324
0
    return false;
325
102
  assert(ref.kindArch() == Reference::KindArch::AArch64);
326
102
  Reference::KindValue kind = ref.kindValue();
327
102
  return (kind == pointer64);
328
102
}
329
330
109
bool ArchHandler_arm64::isPairedReloc(const Relocation &r) {
331
109
  return ((r.type == ARM64_RELOC_ADDEND) || 
(r.type == ARM64_RELOC_SUBTRACTOR)103
);
332
109
}
333
334
uint32_t ArchHandler_arm64::setDisplacementInBranch26(uint32_t instr,
335
25
                                                      int32_t displacement) {
336
25
  assert((displacement <= 134217727) && (displacement > (-134217728)) &&
337
25
         "arm64 branch out of range");
338
25
  return (instr & 0xFC000000) | ((uint32_t)(displacement >> 2) & 0x03FFFFFF);
339
25
}
340
341
uint32_t ArchHandler_arm64::setDisplacementInADRP(uint32_t instruction,
342
31
                                                  int64_t displacement) {
343
31
  assert((displacement <= 0x100000000LL) && (displacement > (-0x100000000LL)) &&
344
31
         "arm64 ADRP out of range");
345
31
  assert(((instruction & 0x9F000000) == 0x90000000) &&
346
31
         "reloc not on ADRP instruction");
347
31
  uint32_t immhi = (displacement >> 9) & (0x00FFFFE0);
348
31
  uint32_t immlo = (displacement << 17) & (0x60000000);
349
31
  return (instruction & 0x9F00001F) | immlo | immhi;
350
31
}
351
352
ArchHandler_arm64::Arm64Kind
353
18
ArchHandler_arm64::offset12KindFromInstruction(uint32_t instruction) {
354
18
  if (instruction & 0x08000000) {
355
12
    switch ((instruction >> 30) & 0x3) {
356
12
    case 0:
357
4
      if ((instruction & 0x04800000) == 0x04800000)
358
2
        return offset12scale16;
359
2
      return offset12;
360
2
    case 1:
361
2
      return offset12scale2;
362
4
    case 2:
363
4
      return offset12scale4;
364
2
    case 3:
365
2
      return offset12scale8;
366
6
    }
367
6
  }
368
6
  return offset12;
369
6
}
370
371
39
uint32_t ArchHandler_arm64::setImm12(uint32_t instruction, uint32_t offset) {
372
39
  assert(((offset & 0xFFFFF000) == 0) && "imm12 offset out of range");
373
39
  uint32_t imm12 = offset << 10;
374
39
  return (instruction & 0xFFC003FF) | imm12;
375
39
}
376
377
llvm::Error ArchHandler_arm64::getReferenceInfo(
378
    const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom,
379
    uint64_t fixupAddress, bool isBig,
380
    FindAtomBySectionAndAddress atomFromAddress,
381
    FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
382
88
    const lld::Atom **target, Reference::Addend *addend) {
383
88
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
384
88
  switch (relocPattern(reloc)) {
385
88
  case ARM64_RELOC_BRANCH26           | rPcRel | rExtern | rLength4:
386
16
    // ex: bl _foo
387
16
    *kind = branch26;
388
16
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
389
0
      return ec;
390
16
    *addend = 0;
391
16
    return llvm::Error::success();
392
16
  case ARM64_RELOC_PAGE21             | rPcRel | rExtern | rLength4:
393
8
    // ex: adrp x1, _foo@PAGE
394
8
    *kind = page21;
395
8
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
396
0
      return ec;
397
8
    *addend = 0;
398
8
    return llvm::Error::success();
399
16
  case ARM64_RELOC_PAGEOFF12                   | rExtern | rLength4:
400
16
    // ex: ldr x0, [x1, _foo@PAGEOFF]
401
16
    *kind = offset12KindFromInstruction(*(const little32_t *)fixupContent);
402
16
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
403
0
      return ec;
404
16
    *addend = 0;
405
16
    return llvm::Error::success();
406
16
  case ARM64_RELOC_GOT_LOAD_PAGE21    | rPcRel | rExtern | rLength4:
407
6
    // ex: adrp x1, _foo@GOTPAGE
408
6
    *kind = gotPage21;
409
6
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
410
0
      return ec;
411
6
    *addend = 0;
412
6
    return llvm::Error::success();
413
6
  case ARM64_RELOC_GOT_LOAD_PAGEOFF12          | rExtern | rLength4:
414
6
    // ex: ldr x0, [x1, _foo@GOTPAGEOFF]
415
6
    *kind = gotOffset12;
416
6
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
417
0
      return ec;
418
6
    *addend = 0;
419
6
    return llvm::Error::success();
420
6
  case ARM64_RELOC_TLVP_LOAD_PAGE21   | rPcRel | rExtern | rLength4:
421
2
    // ex: adrp x1, _foo@TLVPAGE
422
2
    *kind = tlvPage21;
423
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
424
0
      return ec;
425
2
    *addend = 0;
426
2
    return llvm::Error::success();
427
2
  case ARM64_RELOC_TLVP_LOAD_PAGEOFF12         | rExtern | rLength4:
428
2
    // ex: ldr x0, [x1, _foo@TLVPAGEOFF]
429
2
    *kind = tlvOffset12;
430
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
431
0
      return ec;
432
2
    *addend = 0;
433
2
    return llvm::Error::success();
434
13
  case ARM64_RELOC_UNSIGNED                    | rExtern | rLength8:
435
13
    // ex: .quad _foo + N
436
13
    *kind = pointer64;
437
13
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
438
0
      return ec;
439
13
    *addend = *(const little64_t *)fixupContent;
440
13
    return llvm::Error::success();
441
13
  case ARM64_RELOC_UNSIGNED                              | rLength8:
442
12
     // ex: .quad Lfoo + N
443
12
     *kind = pointer64;
444
12
     return atomFromAddress(reloc.symbol, *(const little64_t *)fixupContent,
445
12
                            target, addend);
446
13
  case ARM64_RELOC_POINTER_TO_GOT              | rExtern | rLength8:
447
2
    // ex: .quad _foo@GOT
448
2
    *kind = pointer64ToGOT;
449
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
450
0
      return ec;
451
2
    *addend = 0;
452
2
    return llvm::Error::success();
453
5
  case ARM64_RELOC_POINTER_TO_GOT     | rPcRel | rExtern | rLength4:
454
5
    // ex: .long _foo@GOT - .
455
5
456
5
    // If we are in an .eh_frame section, then the kind of the relocation should
457
5
    // not be delta32ToGOT.  It may instead be unwindCIEToPersonalityFunction.
458
5
    if (inAtom->contentType() == DefinedAtom::typeCFI)
459
2
      *kind = unwindCIEToPersonalityFunction;
460
3
    else
461
3
      *kind = delta32ToGOT;
462
5
463
5
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
464
0
      return ec;
465
5
    *addend = 0;
466
5
    return llvm::Error::success();
467
5
  default:
468
0
    return llvm::make_error<GenericError>("unsupported arm64 relocation type");
469
88
  }
470
88
}
471
472
llvm::Error ArchHandler_arm64::getPairReferenceInfo(
473
    const normalized::Relocation &reloc1, const normalized::Relocation &reloc2,
474
    const DefinedAtom *inAtom, uint32_t offsetInAtom, uint64_t fixupAddress,
475
    bool swap, bool scatterable, FindAtomBySectionAndAddress atomFromAddress,
476
    FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
477
21
    const lld::Atom **target, Reference::Addend *addend) {
478
21
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
479
21
  switch (relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
480
21
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
481
2
         ARM64_RELOC_BRANCH26           | rPcRel | rExtern | rLength4):
482
2
    // ex: bl _foo+8
483
2
    *kind = branch26;
484
2
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
485
0
      return ec;
486
2
    *addend = reloc1.symbol;
487
2
    return llvm::Error::success();
488
2
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
489
2
         ARM64_RELOC_PAGE21             | rPcRel | rExtern | rLength4):
490
2
    // ex: adrp x1, _foo@PAGE
491
2
    *kind = page21;
492
2
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
493
0
      return ec;
494
2
    *addend = reloc1.symbol;
495
2
    return llvm::Error::success();
496
2
  case ((ARM64_RELOC_ADDEND                                | rLength4) << 16 |
497
2
         ARM64_RELOC_PAGEOFF12                   | rExtern | rLength4): {
498
2
    // ex: ldr w0, [x1, _foo@PAGEOFF]
499
2
    uint32_t cont32 = (int32_t)*(const little32_t *)fixupContent;
500
2
    *kind = offset12KindFromInstruction(cont32);
501
2
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
502
0
      return ec;
503
2
    *addend = reloc1.symbol;
504
2
    return llvm::Error::success();
505
2
  }
506
11
  case ((ARM64_RELOC_SUBTRACTOR                  | rExtern | rLength8) << 16 |
507
11
         ARM64_RELOC_UNSIGNED                    | rExtern | rLength8):
508
11
    // ex: .quad _foo - .
509
11
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
510
0
      return ec;
511
11
512
11
    // If we are in an .eh_frame section, then the kind of the relocation should
513
11
    // not be delta64.  It may instead be unwindFDEToFunction.
514
11
    if (inAtom->contentType() == DefinedAtom::typeCFI)
515
4
      *kind = unwindFDEToFunction;
516
7
    else
517
7
      *kind = delta64;
518
11
519
11
    // The offsets of the 2 relocations must match
520
11
    if (reloc1.offset != reloc2.offset)
521
1
      return llvm::make_error<GenericError>(
522
1
                                    "paired relocs must have the same offset");
523
10
    *addend = (int64_t)*(const little64_t *)fixupContent + offsetInAtom;
524
10
    return llvm::Error::success();
525
10
  case ((ARM64_RELOC_SUBTRACTOR                  | rExtern | rLength4) << 16 |
526
4
         ARM64_RELOC_UNSIGNED                    | rExtern | rLength4):
527
4
    // ex: .quad _foo - .
528
4
    *kind = delta32;
529
4
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
530
0
      return ec;
531
4
    *addend = (int32_t)*(const little32_t *)fixupContent + offsetInAtom;
532
4
    return llvm::Error::success();
533
4
  default:
534
0
    return llvm::make_error<GenericError>("unsupported arm64 relocation pair");
535
21
  }
536
21
}
537
538
void ArchHandler_arm64::generateAtomContent(
539
    const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
540
    FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
541
119
    llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
542
119
  // Copy raw bytes.
543
119
  std::copy(atom.rawContent().begin(), atom.rawContent().end(),
544
119
            atomContentBuffer.begin());
545
119
  // Apply fix-ups.
546
#ifndef NDEBUG
547
  if (atom.begin() != atom.end()) {
548
    DEBUG_WITH_TYPE("atom-content", llvm::dbgs()
549
                    << "Applying fixups to atom:\n"
550
                    << "   address="
551
                    << llvm::format("    0x%09lX", &atom)
552
                    << ", file=#"
553
                    << atom.file().ordinal()
554
                    << ", atom=#"
555
                    << atom.ordinal()
556
                    << ", name="
557
                    << atom.name()
558
                    << ", type="
559
                    << atom.contentType()
560
                    << "\n");
561
  }
562
#endif
563
204
  for (const Reference *ref : atom) {
564
204
    uint32_t offset = ref->offsetInAtom();
565
204
    const Atom *target = ref->target();
566
204
    bool targetUnnamed = target->name().empty();
567
204
    uint64_t targetAddress = 0;
568
204
    if (isa<DefinedAtom>(target))
569
153
      targetAddress = findAddress(*target);
570
204
    uint64_t atomAddress = findAddress(atom);
571
204
    uint64_t fixupAddress = atomAddress + offset;
572
204
    if (relocatable) {
573
102
      applyFixupRelocatable(*ref, &atomContentBuffer[offset], fixupAddress,
574
102
                            targetAddress, atomAddress, targetUnnamed);
575
102
    } else {
576
102
      applyFixupFinal(*ref, &atomContentBuffer[offset], fixupAddress,
577
102
                      targetAddress, atomAddress, imageBaseAddress,
578
102
                      findSectionAddress);
579
102
    }
580
204
  }
581
119
}
582
583
void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc,
584
                                        uint64_t fixupAddress,
585
                                        uint64_t targetAddress,
586
                                        uint64_t inAtomAddress,
587
                                        uint64_t imageBaseAddress,
588
102
                                        FindAddressForAtom findSectionAddress) {
589
102
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
590
0
    return;
591
102
  assert(ref.kindArch() == Reference::KindArch::AArch64);
592
102
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
593
102
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
594
102
  int32_t displacement;
595
102
  uint32_t instruction;
596
102
  uint32_t value32;
597
102
  uint32_t value64;
598
102
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
599
102
  case branch26:
600
17
    displacement = (targetAddress - fixupAddress) + ref.addend();
601
17
    *loc32 = setDisplacementInBranch26(*loc32, displacement);
602
17
    return;
603
102
  case page21:
604
19
  case gotPage21:
605
19
  case tlvPage21:
606
19
    displacement =
607
19
        ((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096));
608
19
    *loc32 = setDisplacementInADRP(*loc32, displacement);
609
19
    return;
610
19
  case offset12:
611
5
  case gotOffset12:
612
5
  case tlvOffset12:
613
5
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
614
5
    *loc32 = setImm12(*loc32, displacement);
615
5
    return;
616
5
  case offset12scale2:
617
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
618
0
    assert(((displacement & 0x1) == 0) &&
619
0
           "scaled imm12 not accessing 2-byte aligneds");
620
0
    *loc32 = setImm12(*loc32, displacement >> 1);
621
0
    return;
622
5
  case offset12scale4:
623
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
624
0
    assert(((displacement & 0x3) == 0) &&
625
0
           "scaled imm12 not accessing 4-byte aligned");
626
0
    *loc32 = setImm12(*loc32, displacement >> 2);
627
0
    return;
628
14
  case offset12scale8:
629
14
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
630
14
    assert(((displacement & 0x7) == 0) &&
631
14
           "scaled imm12 not accessing 8-byte aligned");
632
14
    *loc32 = setImm12(*loc32, displacement >> 3);
633
14
    return;
634
5
  case offset12scale16:
635
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
636
0
    assert(((displacement & 0xF) == 0) &&
637
0
           "scaled imm12 not accessing 16-byte aligned");
638
0
    *loc32 = setImm12(*loc32, displacement >> 4);
639
0
    return;
640
5
  case addOffset12:
641
0
    instruction = *loc32;
642
0
    assert(((instruction & 0xFFC00000) == 0xF9400000) &&
643
0
           "GOT reloc is not an LDR instruction");
644
0
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
645
0
    value32 = 0x91000000 | (instruction & 0x000003FF);
646
0
    instruction = setImm12(value32, displacement);
647
0
    *loc32 = instruction;
648
0
    return;
649
16
  case pointer64:
650
16
  case pointer64ToGOT:
651
16
    *loc64 = targetAddress + ref.addend();
652
16
    return;
653
16
  case delta64:
654
0
  case unwindFDEToFunction:
655
0
    *loc64 = (targetAddress - fixupAddress) + ref.addend();
656
0
    return;
657
2
  case delta32:
658
2
  case delta32ToGOT:
659
2
  case unwindCIEToPersonalityFunction:
660
2
    *loc32 = (targetAddress - fixupAddress) + ref.addend();
661
2
    return;
662
2
  case negDelta32:
663
0
    *loc32 = fixupAddress - targetAddress + ref.addend();
664
0
    return;
665
7
  case lazyPointer:
666
7
    // Do nothing
667
7
    return;
668
14
  case lazyImmediateLocation:
669
14
    *loc32 = ref.addend();
670
14
    return;
671
8
  case imageOffset:
672
8
    *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
673
8
    return;
674
2
  case imageOffsetGot:
675
0
    llvm_unreachable("imageOffsetGot should have been changed to imageOffset");
676
2
    
break0
;
677
2
  case unwindInfoToEhFrame:
678
0
    value64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
679
0
    assert(value64 < 0xffffffU && "offset in __eh_frame too large");
680
0
    *loc32 = (*loc32 & 0xff000000U) | value64;
681
0
    return;
682
2
  case invalid:
683
0
    // Fall into llvm_unreachable().
684
0
    break;
685
0
  }
686
0
  llvm_unreachable("invalid arm64 Reference Kind");
687
0
}
688
689
void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref,
690
                                              uint8_t *loc,
691
                                              uint64_t fixupAddress,
692
                                              uint64_t targetAddress,
693
                                              uint64_t inAtomAddress,
694
102
                                              bool targetUnnamed) {
695
102
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
696
0
    return;
697
102
  assert(ref.kindArch() == Reference::KindArch::AArch64);
698
102
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
699
102
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
700
102
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
701
102
  case branch26:
702
8
    *loc32 = setDisplacementInBranch26(*loc32, 0);
703
8
    return;
704
102
  case page21:
705
12
  case gotPage21:
706
12
  case tlvPage21:
707
12
    *loc32 = setDisplacementInADRP(*loc32, 0);
708
12
    return;
709
20
  case offset12:
710
20
  case offset12scale2:
711
20
  case offset12scale4:
712
20
  case offset12scale8:
713
20
  case offset12scale16:
714
20
  case gotOffset12:
715
20
  case tlvOffset12:
716
20
    *loc32 = setImm12(*loc32, 0);
717
20
    return;
718
20
  case pointer64:
719
20
    if (targetUnnamed)
720
2
      *loc64 = targetAddress + ref.addend();
721
18
    else
722
18
      *loc64 = ref.addend();
723
20
    return;
724
20
  case delta64:
725
6
    *loc64 = ref.addend() + inAtomAddress - fixupAddress;
726
6
    return;
727
20
  case unwindFDEToFunction:
728
14
    // We don't emit unwindFDEToFunction in -r mode as they are implicitly
729
14
    // generated from the data in the __eh_frame section.  So here we need
730
14
    // to use the targetAddress so that we can generate the full relocation
731
14
    // when we parse again later.
732
14
    *loc64 = targetAddress - fixupAddress;
733
14
    return;
734
20
  case delta32:
735
4
    *loc32 = ref.addend() + inAtomAddress - fixupAddress;
736
4
    return;
737
20
  case negDelta32:
738
8
    // We don't emit negDelta32 in -r mode as they are implicitly
739
8
    // generated from the data in the __eh_frame section.  So here we need
740
8
    // to use the targetAddress so that we can generate the full relocation
741
8
    // when we parse again later.
742
8
    *loc32 = fixupAddress - targetAddress + ref.addend();
743
8
    return;
744
20
  case pointer64ToGOT:
745
2
    *loc64 = 0;
746
2
    return;
747
20
  case delta32ToGOT:
748
2
    *loc32 = inAtomAddress - fixupAddress;
749
2
    return;
750
20
  case unwindCIEToPersonalityFunction:
751
6
    // We don't emit unwindCIEToPersonalityFunction in -r mode as they are
752
6
    // implicitly generated from the data in the __eh_frame section.  So here we
753
6
    // need to use the targetAddress so that we can generate the full relocation
754
6
    // when we parse again later.
755
6
    *loc32 = targetAddress - fixupAddress;
756
6
    return;
757
20
  case addOffset12:
758
0
    llvm_unreachable("lazy reference kind implies GOT pass was run");
759
20
  case lazyPointer:
760
0
  case lazyImmediateLocation:
761
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
762
0
  case imageOffset:
763
0
  case imageOffsetGot:
764
0
  case unwindInfoToEhFrame:
765
0
    llvm_unreachable("fixup implies __unwind_info");
766
0
    return;
767
0
  case invalid:
768
0
    // Fall into llvm_unreachable().
769
0
    break;
770
0
  }
771
0
  llvm_unreachable("unknown arm64 Reference Kind");
772
0
}
773
774
void ArchHandler_arm64::appendSectionRelocations(
775
    const DefinedAtom &atom, uint64_t atomSectionOffset, const Reference &ref,
776
    FindSymbolIndexForAtom symbolIndexForAtom,
777
    FindSectionIndexForAtom sectionIndexForAtom,
778
74
    FindAddressForAtom addressForAtom, normalized::Relocations &relocs) {
779
74
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
780
0
    return;
781
74
  assert(ref.kindArch() == Reference::KindArch::AArch64);
782
74
  uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
783
74
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
784
74
  case branch26:
785
8
    if (ref.addend()) {
786
2
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
787
2
                  ARM64_RELOC_ADDEND | rLength4);
788
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
789
2
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
790
6
     } else {
791
6
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
792
6
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
793
6
    }
794
8
    return;
795
74
  case page21:
796
8
    if (ref.addend()) {
797
2
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
798
2
                  ARM64_RELOC_ADDEND | rLength4);
799
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
800
2
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
801
6
     } else {
802
6
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
803
6
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
804
6
    }
805
8
    return;
806
74
  case offset12:
807
16
  case offset12scale2:
808
16
  case offset12scale4:
809
16
  case offset12scale8:
810
16
  case offset12scale16:
811
16
    if (ref.addend()) {
812
2
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
813
2
                  ARM64_RELOC_ADDEND | rLength4);
814
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
815
2
                  ARM64_RELOC_PAGEOFF12  | rExtern | rLength4);
816
14
     } else {
817
14
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
818
14
                  ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
819
14
    }
820
16
    return;
821
16
  case gotPage21:
822
2
    assert(ref.addend() == 0);
823
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
824
2
                  ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
825
2
    return;
826
16
  case gotOffset12:
827
2
    assert(ref.addend() == 0);
828
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
829
2
                  ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4);
830
2
    return;
831
16
  case tlvPage21:
832
2
    assert(ref.addend() == 0);
833
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
834
2
                  ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
835
2
    return;
836
16
  case tlvOffset12:
837
2
    assert(ref.addend() == 0);
838
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
839
2
                  ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4);
840
2
    return;
841
20
  case pointer64:
842
20
    if (ref.target()->name().empty())
843
2
      appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
844
2
                  ARM64_RELOC_UNSIGNED           | rLength8);
845
18
    else
846
18
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
847
18
                  ARM64_RELOC_UNSIGNED | rExtern | rLength8);
848
20
    return;
849
16
  case delta64:
850
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
851
6
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength8);
852
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
853
6
                ARM64_RELOC_UNSIGNED  | rExtern | rLength8);
854
6
    return;
855
16
  case delta32:
856
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
857
4
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength4 );
858
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
859
4
                ARM64_RELOC_UNSIGNED   | rExtern | rLength4 );
860
4
    return;
861
16
  case pointer64ToGOT:
862
2
    assert(ref.addend() == 0);
863
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
864
2
                  ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8);
865
2
    return;
866
16
  case delta32ToGOT:
867
2
    assert(ref.addend() == 0);
868
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
869
2
                  ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4);
870
2
    return;
871
16
  case addOffset12:
872
0
    llvm_unreachable("lazy reference kind implies GOT pass was run");
873
16
  case lazyPointer:
874
0
  case lazyImmediateLocation:
875
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
876
0
  case imageOffset:
877
0
  case imageOffsetGot:
878
0
    llvm_unreachable("deltas from mach_header can only be in final images");
879
0
  case unwindCIEToPersonalityFunction:
880
0
  case unwindFDEToFunction:
881
0
  case unwindInfoToEhFrame:
882
0
  case negDelta32:
883
0
    // Do nothing.
884
0
    return;
885
0
  case invalid:
886
0
    // Fall into llvm_unreachable().
887
0
    break;
888
0
  }
889
0
  llvm_unreachable("unknown arm64 Reference Kind");
890
0
}
891
892
35
std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_arm64() {
893
35
  return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_arm64());
894
35
}
895
896
} // namespace mach_o
897
} // namespace lld