Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/tools/lld/lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- lib/FileFormat/MachO/ArchHandler_x86_64.cpp ------------------------===//
2
//
3
//                             The LLVM Linker
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "ArchHandler.h"
11
#include "Atoms.h"
12
#include "MachONormalizedFileBinaryUtils.h"
13
#include "llvm/ADT/StringRef.h"
14
#include "llvm/ADT/StringSwitch.h"
15
#include "llvm/ADT/Triple.h"
16
#include "llvm/Support/Endian.h"
17
#include "llvm/Support/ErrorHandling.h"
18
19
using namespace llvm::MachO;
20
using namespace lld::mach_o::normalized;
21
22
namespace lld {
23
namespace mach_o {
24
25
using llvm::support::ulittle32_t;
26
using llvm::support::ulittle64_t;
27
28
using llvm::support::little32_t;
29
using llvm::support::little64_t;
30
31
class ArchHandler_x86_64 : public ArchHandler {
32
public:
33
281
  ArchHandler_x86_64() = default;
34
281
  ~ArchHandler_x86_64() override = default;
35
36
153
  const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
37
38
362
  Reference::KindArch kindArch() override {
39
362
    return Reference::KindArch::x86_64;
40
362
  }
41
42
  /// Used by GOTPass to locate GOT References
43
189
  bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
44
189
    if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
45
0
      return false;
46
189
    assert(ref.kindArch() == Reference::KindArch::x86_64);
47
189
    switch (ref.kindValue()) {
48
8
    case ripRel32GotLoad:
49
8
      canBypassGOT = true;
50
8
      return true;
51
0
    case ripRel32Got:
52
0
      canBypassGOT = false;
53
0
      return true;
54
1
    case imageOffsetGot:
55
1
      canBypassGOT = false;
56
1
      return true;
57
180
    default:
58
180
      return false;
59
0
    }
60
0
  }
61
62
198
  bool isTLVAccess(const Reference &ref) const override {
63
198
    assert(ref.kindNamespace() == Reference::KindNamespace::mach_o);
64
198
    assert(ref.kindArch() == Reference::KindArch::x86_64);
65
198
    return ref.kindValue() == ripRel32Tlv;
66
198
  }
67
68
1
  void updateReferenceToTLV(const Reference *ref) override {
69
1
    assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70
1
    assert(ref->kindArch() == Reference::KindArch::x86_64);
71
1
    assert(ref->kindValue() == ripRel32Tlv);
72
1
    const_cast<Reference*>(ref)->setKindValue(ripRel32);
73
1
  }
74
75
  /// Used by GOTPass to update GOT References
76
9
  void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
77
9
    assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
78
9
    assert(ref->kindArch() == Reference::KindArch::x86_64);
79
9
80
9
    switch (ref->kindValue()) {
81
0
    case ripRel32Got:
82
0
      assert(targetNowGOT && "target must be GOT");
83
8
    case ripRel32GotLoad:
84
8
      const_cast<Reference *>(ref)
85
8
        ->setKindValue(targetNowGOT ? 
ripRel328
:
ripRel32GotLoadNowLea0
);
86
8
      break;
87
1
    case imageOffsetGot:
88
1
      const_cast<Reference *>(ref)->setKindValue(imageOffset);
89
1
      break;
90
0
    default:
91
0
      llvm_unreachable("unknown GOT reference kind");
92
9
    }
93
9
  }
94
95
81
  bool needsCompactUnwind() override {
96
81
    return true;
97
81
  }
98
99
42
  Reference::KindValue imageOffsetKind() override {
100
42
    return imageOffset;
101
42
  }
102
103
1
  Reference::KindValue imageOffsetKindIndirect() override {
104
1
    return imageOffsetGot;
105
1
  }
106
107
0
  Reference::KindValue unwindRefToPersonalityFunctionKind() override {
108
0
    return ripRel32Got;
109
0
  }
110
111
14
  Reference::KindValue unwindRefToCIEKind() override {
112
14
    return negDelta32;
113
14
  }
114
115
36
  Reference::KindValue unwindRefToFunctionKind() override{
116
36
    return unwindFDEToFunction;
117
36
  }
118
119
11
  Reference::KindValue lazyImmediateLocationKind() override {
120
11
    return lazyImmediateLocation;
121
11
  }
122
123
3
  Reference::KindValue unwindRefToEhFrameKind() override {
124
3
    return unwindInfoToEhFrame;
125
3
  }
126
127
0
  Reference::KindValue pointerKind() override {
128
0
    return pointer64;
129
0
  }
130
131
47
  uint32_t dwarfCompactUnwindType() override {
132
47
    return 0x04000000U;
133
47
  }
134
135
387
  const StubInfo &stubInfo() override { return _sStubInfo; }
136
137
0
  bool isNonCallBranch(const Reference &) override {
138
0
    return false;
139
0
  }
140
141
  bool isCallSite(const Reference &) override;
142
  bool isPointer(const Reference &) override;
143
  bool isPairedReloc(const normalized::Relocation &) override;
144
145
  llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
146
                               const DefinedAtom *inAtom,
147
                               uint32_t offsetInAtom,
148
                               uint64_t fixupAddress, bool swap,
149
                               FindAtomBySectionAndAddress atomFromAddress,
150
                               FindAtomBySymbolIndex atomFromSymbolIndex,
151
                               Reference::KindValue *kind,
152
                               const lld::Atom **target,
153
                               Reference::Addend *addend) override;
154
  llvm::Error
155
      getPairReferenceInfo(const normalized::Relocation &reloc1,
156
                           const normalized::Relocation &reloc2,
157
                           const DefinedAtom *inAtom,
158
                           uint32_t offsetInAtom,
159
                           uint64_t fixupAddress, bool swap, bool scatterable,
160
                           FindAtomBySectionAndAddress atomFromAddress,
161
                           FindAtomBySymbolIndex atomFromSymbolIndex,
162
                           Reference::KindValue *kind,
163
                           const lld::Atom **target,
164
                           Reference::Addend *addend) override;
165
166
51
  bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
167
51
    return (atom->contentType() == DefinedAtom::typeCString);
168
51
  }
169
170
  void generateAtomContent(const DefinedAtom &atom, bool relocatable,
171
                           FindAddressForAtom findAddress,
172
                           FindAddressForAtom findSectionAddress,
173
                           uint64_t imageBase,
174
                    llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
175
176
  void appendSectionRelocations(const DefinedAtom &atom,
177
                                uint64_t atomSectionOffset,
178
                                const Reference &ref,
179
                                FindSymbolIndexForAtom symbolIndexForAtom,
180
                                FindSectionIndexForAtom sectionIndexForAtom,
181
                                FindAddressForAtom addressForAtom,
182
                                normalized::Relocations &relocs) override;
183
184
private:
185
  static const Registry::KindStrings _sKindStrings[];
186
  static const StubInfo              _sStubInfo;
187
188
  enum X86_64Kind: Reference::KindValue {
189
    invalid,               /// for error condition
190
191
    // Kinds found in mach-o .o files:
192
    branch32,              /// ex: call _foo
193
    ripRel32,              /// ex: movq _foo(%rip), %rax
194
    ripRel32Minus1,        /// ex: movb $0x12, _foo(%rip)
195
    ripRel32Minus2,        /// ex: movw $0x1234, _foo(%rip)
196
    ripRel32Minus4,        /// ex: movl $0x12345678, _foo(%rip)
197
    ripRel32Anon,          /// ex: movq L1(%rip), %rax
198
    ripRel32Minus1Anon,    /// ex: movb $0x12, L1(%rip)
199
    ripRel32Minus2Anon,    /// ex: movw $0x1234, L1(%rip)
200
    ripRel32Minus4Anon,    /// ex: movw $0x12345678, L1(%rip)
201
    ripRel32GotLoad,       /// ex: movq  _foo@GOTPCREL(%rip), %rax
202
    ripRel32Got,           /// ex: pushq _foo@GOTPCREL(%rip)
203
    ripRel32Tlv,           /// ex: movq  _foo@TLVP(%rip), %rdi
204
    pointer64,             /// ex: .quad _foo
205
    pointer64Anon,         /// ex: .quad L1
206
    delta64,               /// ex: .quad _foo - .
207
    delta32,               /// ex: .long _foo - .
208
    delta64Anon,           /// ex: .quad L1 - .
209
    delta32Anon,           /// ex: .long L1 - .
210
    negDelta64,            /// ex: .quad . - _foo
211
    negDelta32,            /// ex: .long . - _foo
212
213
    // Kinds introduced by Passes:
214
    ripRel32GotLoadNowLea, /// Target of GOT load is in linkage unit so
215
                           ///  "movq  _foo@GOTPCREL(%rip), %rax" can be changed
216
                           /// to "leaq _foo(%rip), %rax
217
    lazyPointer,           /// Location contains a lazy pointer.
218
    lazyImmediateLocation, /// Location contains immediate value used in stub.
219
220
    imageOffset,           /// Location contains offset of atom in final image
221
    imageOffsetGot,        /// Location contains offset of GOT entry for atom in
222
                           /// final image (typically personality function).
223
    unwindFDEToFunction,   /// Nearly delta64, but cannot be rematerialized in
224
                           /// relocatable object (yay for implicit contracts!).
225
    unwindInfoToEhFrame,   /// Fix low 24 bits of compact unwind encoding to
226
                           /// refer to __eh_frame entry.
227
    tlvInitSectionOffset   /// Location contains offset tlv init-value atom
228
                           /// within the __thread_data section.
229
  };
230
231
  Reference::KindValue kindFromReloc(const normalized::Relocation &reloc);
232
233
  void applyFixupFinal(const Reference &ref, uint8_t *location,
234
                       uint64_t fixupAddress, uint64_t targetAddress,
235
                       uint64_t inAtomAddress, uint64_t imageBaseAddress,
236
                       FindAddressForAtom findSectionAddress);
237
238
  void applyFixupRelocatable(const Reference &ref, uint8_t *location,
239
                             uint64_t fixupAddress,
240
                             uint64_t targetAddress,
241
                             uint64_t inAtomAddress);
242
};
243
244
const Registry::KindStrings ArchHandler_x86_64::_sKindStrings[] = {
245
  LLD_KIND_STRING_ENTRY(invalid), LLD_KIND_STRING_ENTRY(branch32),
246
  LLD_KIND_STRING_ENTRY(ripRel32), LLD_KIND_STRING_ENTRY(ripRel32Minus1),
247
  LLD_KIND_STRING_ENTRY(ripRel32Minus2), LLD_KIND_STRING_ENTRY(ripRel32Minus4),
248
  LLD_KIND_STRING_ENTRY(ripRel32Anon),
249
  LLD_KIND_STRING_ENTRY(ripRel32Minus1Anon),
250
  LLD_KIND_STRING_ENTRY(ripRel32Minus2Anon),
251
  LLD_KIND_STRING_ENTRY(ripRel32Minus4Anon),
252
  LLD_KIND_STRING_ENTRY(ripRel32GotLoad),
253
  LLD_KIND_STRING_ENTRY(ripRel32GotLoadNowLea),
254
  LLD_KIND_STRING_ENTRY(ripRel32Got), LLD_KIND_STRING_ENTRY(ripRel32Tlv),
255
  LLD_KIND_STRING_ENTRY(lazyPointer),
256
  LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
257
  LLD_KIND_STRING_ENTRY(pointer64), LLD_KIND_STRING_ENTRY(pointer64Anon),
258
  LLD_KIND_STRING_ENTRY(delta32), LLD_KIND_STRING_ENTRY(delta64),
259
  LLD_KIND_STRING_ENTRY(delta32Anon), LLD_KIND_STRING_ENTRY(delta64Anon),
260
  LLD_KIND_STRING_ENTRY(negDelta64),
261
  LLD_KIND_STRING_ENTRY(negDelta32),
262
  LLD_KIND_STRING_ENTRY(imageOffset), LLD_KIND_STRING_ENTRY(imageOffsetGot),
263
  LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
264
  LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
265
  LLD_KIND_STRING_ENTRY(tlvInitSectionOffset),
266
  LLD_KIND_STRING_END
267
};
268
269
const ArchHandler::StubInfo ArchHandler_x86_64::_sStubInfo = {
270
  "dyld_stub_binder",
271
272
  // Lazy pointer references
273
  { Reference::KindArch::x86_64, pointer64, 0, 0 },
274
  { Reference::KindArch::x86_64, lazyPointer, 0, 0 },
275
276
  // GOT pointer to dyld_stub_binder
277
  { Reference::KindArch::x86_64, pointer64, 0, 0 },
278
279
  // x86_64 code alignment 2^1
280
  1,
281
282
  // Stub size and code
283
  6,
284
  { 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 },       // jmp *lazyPointer
285
  { Reference::KindArch::x86_64, ripRel32, 2, 0 },
286
  { false, 0, 0, 0 },
287
288
  // Stub Helper size and code
289
  10,
290
  { 0x68, 0x00, 0x00, 0x00, 0x00,               // pushq $lazy-info-offset
291
    0xE9, 0x00, 0x00, 0x00, 0x00 },             // jmp helperhelper
292
  { Reference::KindArch::x86_64, lazyImmediateLocation, 1, 0 },
293
  { Reference::KindArch::x86_64, branch32, 6, 0 },
294
295
  // Stub helper image cache content type
296
  DefinedAtom::typeNonLazyPointer,
297
298
  // Stub Helper-Common size and code
299
  16,
300
  // Stub helper alignment
301
  2,
302
  { 0x4C, 0x8D, 0x1D, 0x00, 0x00, 0x00, 0x00,   // leaq cache(%rip),%r11
303
    0x41, 0x53,                                 // push %r11
304
    0xFF, 0x25, 0x00, 0x00, 0x00, 0x00,         // jmp *binder(%rip)
305
    0x90 },                                     // nop
306
  { Reference::KindArch::x86_64, ripRel32, 3, 0 },
307
  { false, 0, 0, 0 },
308
  { Reference::KindArch::x86_64, ripRel32, 11, 0 },
309
  { false, 0, 0, 0 }
310
311
};
312
313
81
bool ArchHandler_x86_64::isCallSite(const Reference &ref) {
314
81
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
315
0
    return false;
316
81
  assert(ref.kindArch() == Reference::KindArch::x86_64);
317
81
  return (ref.kindValue() == branch32);
318
81
}
319
320
196
bool ArchHandler_x86_64::isPointer(const Reference &ref) {
321
196
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
322
0
    return false;
323
196
  assert(ref.kindArch() == Reference::KindArch::x86_64);
324
196
  Reference::KindValue kind = ref.kindValue();
325
155
  return (kind == pointer64 || kind == pointer64Anon);
326
196
}
327
328
133
bool ArchHandler_x86_64::isPairedReloc(const Relocation &reloc) {
329
133
  return (reloc.type == X86_64_RELOC_SUBTRACTOR);
330
133
}
331
332
Reference::KindValue
333
115
ArchHandler_x86_64::kindFromReloc(const Relocation &reloc) {
334
115
  switch(relocPattern(reloc)) {
335
24
  case X86_64_RELOC_BRANCH   | rPcRel | rExtern | rLength4:
336
24
    return branch32;
337
8
  case X86_64_RELOC_SIGNED   | rPcRel | rExtern | rLength4:
338
8
    return ripRel32;
339
2
  case X86_64_RELOC_SIGNED   | rPcRel |           rLength4:
340
2
    return ripRel32Anon;
341
2
  case X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4:
342
2
    return ripRel32Minus1;
343
2
  case X86_64_RELOC_SIGNED_1 | rPcRel |           rLength4:
344
2
    return ripRel32Minus1Anon;
345
2
  case X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4:
346
2
    return ripRel32Minus2;
347
2
  case X86_64_RELOC_SIGNED_2 | rPcRel |           rLength4:
348
2
    return ripRel32Minus2Anon;
349
2
  case X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4:
350
2
    return ripRel32Minus4;
351
2
  case X86_64_RELOC_SIGNED_4 | rPcRel |           rLength4:
352
2
    return ripRel32Minus4Anon;
353
10
  case X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4:
354
10
    return ripRel32GotLoad;
355
3
  case X86_64_RELOC_GOT      | rPcRel | rExtern | rLength4:
356
3
    return ripRel32Got;
357
4
  case X86_64_RELOC_TLV      | rPcRel | rExtern | rLength4:
358
4
    return ripRel32Tlv;
359
37
  case X86_64_RELOC_UNSIGNED          | rExtern | rLength8:
360
37
    return pointer64;
361
15
  case X86_64_RELOC_UNSIGNED                    | rLength8:
362
15
    return pointer64Anon;
363
0
  default:
364
0
    return invalid;
365
0
  }
366
0
}
367
368
llvm::Error
369
ArchHandler_x86_64::getReferenceInfo(const Relocation &reloc,
370
                                    const DefinedAtom *inAtom,
371
                                    uint32_t offsetInAtom,
372
                                    uint64_t fixupAddress, bool swap,
373
                                    FindAtomBySectionAndAddress atomFromAddress,
374
                                    FindAtomBySymbolIndex atomFromSymbolIndex,
375
                                    Reference::KindValue *kind,
376
                                    const lld::Atom **target,
377
115
                                    Reference::Addend *addend) {
378
115
  *kind = kindFromReloc(reloc);
379
115
  if (*kind == invalid)
380
0
    return llvm::make_error<GenericError>("unknown type");
381
115
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
382
115
  uint64_t targetAddress;
383
115
  switch (*kind) {
384
32
  case branch32:
385
32
  case ripRel32:
386
32
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
387
0
      return ec;
388
32
    *addend = *(const little32_t *)fixupContent;
389
32
    return llvm::Error::success();
390
2
  case ripRel32Minus1:
391
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
392
0
      return ec;
393
2
    *addend = (int32_t)*(const little32_t *)fixupContent + 1;
394
2
    return llvm::Error::success();
395
2
  case ripRel32Minus2:
396
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
397
0
      return ec;
398
2
    *addend = (int32_t)*(const little32_t *)fixupContent + 2;
399
2
    return llvm::Error::success();
400
2
  case ripRel32Minus4:
401
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
402
0
      return ec;
403
2
    *addend = (int32_t)*(const little32_t *)fixupContent + 4;
404
2
    return llvm::Error::success();
405
2
  case ripRel32Anon:
406
2
    targetAddress = fixupAddress + 4 + *(const little32_t *)fixupContent;
407
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
408
2
  case ripRel32Minus1Anon:
409
2
    targetAddress = fixupAddress + 5 + *(const little32_t *)fixupContent;
410
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
411
2
  case ripRel32Minus2Anon:
412
2
    targetAddress = fixupAddress + 6 + *(const little32_t *)fixupContent;
413
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
414
2
  case ripRel32Minus4Anon:
415
2
    targetAddress = fixupAddress + 8 + *(const little32_t *)fixupContent;
416
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
417
17
  case ripRel32GotLoad:
418
17
  case ripRel32Got:
419
17
  case ripRel32Tlv:
420
17
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
421
0
      return ec;
422
17
    *addend = *(const little32_t *)fixupContent;
423
17
    return llvm::Error::success();
424
37
  case tlvInitSectionOffset:
425
37
  case pointer64:
426
37
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
427
0
      return ec;
428
37
    // If this is the 3rd pointer of a tlv-thunk (i.e. the pointer to the TLV's
429
37
    // initial value) we need to handle it specially.
430
37
    
if (37
inAtom->contentType() == DefinedAtom::typeThunkTLV &&
431
37
        
offsetInAtom == 168
) {
432
4
      *kind = tlvInitSectionOffset;
433
4
      assert(*addend == 0 && "TLV-init has non-zero addend?");
434
4
    } else
435
33
      *addend = *(const little64_t *)fixupContent;
436
37
    return llvm::Error::success();
437
15
  case pointer64Anon:
438
15
    targetAddress = *(const little64_t *)fixupContent;
439
15
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
440
0
  default:
441
0
    llvm_unreachable("bad reloc kind");
442
0
  }
443
0
}
444
445
llvm::Error
446
ArchHandler_x86_64::getPairReferenceInfo(const normalized::Relocation &reloc1,
447
                                   const normalized::Relocation &reloc2,
448
                                   const DefinedAtom *inAtom,
449
                                   uint32_t offsetInAtom,
450
                                   uint64_t fixupAddress, bool swap,
451
                                   bool scatterable,
452
                                   FindAtomBySectionAndAddress atomFromAddress,
453
                                   FindAtomBySymbolIndex atomFromSymbolIndex,
454
                                   Reference::KindValue *kind,
455
                                   const lld::Atom **target,
456
18
                                   Reference::Addend *addend) {
457
18
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
458
18
  uint64_t targetAddress;
459
18
  const lld::Atom *fromTarget;
460
18
  if (auto ec = atomFromSymbolIndex(reloc1.symbol, &fromTarget))
461
0
    return ec;
462
18
463
18
  switch(relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
464
8
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
465
8
        X86_64_RELOC_UNSIGNED    | rExtern | rLength8): {
466
8
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
467
0
      return ec;
468
8
    uint64_t encodedAddend = (int64_t)*(const little64_t *)fixupContent;
469
8
    if (
inAtom == fromTarget8
) {
470
6
      if (inAtom->contentType() == DefinedAtom::typeCFI)
471
0
        *kind = unwindFDEToFunction;
472
6
      else
473
6
        *kind = delta64;
474
6
      *addend = encodedAddend + offsetInAtom;
475
8
    } else 
if (2
inAtom == *target2
) {
476
2
      *kind = negDelta64;
477
2
      *addend = encodedAddend - offsetInAtom;
478
2
      *target = fromTarget;
479
2
    } else
480
0
      return llvm::make_error<GenericError>("Invalid pointer diff");
481
8
    return llvm::Error::success();
482
8
  }
483
6
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
484
6
        X86_64_RELOC_UNSIGNED    | rExtern | rLength4): {
485
6
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
486
0
      return ec;
487
6
    uint32_t encodedAddend = (int32_t)*(const little32_t *)fixupContent;
488
6
    if (
inAtom == fromTarget6
) {
489
4
      *kind = delta32;
490
4
      *addend = encodedAddend + offsetInAtom;
491
6
    } else 
if (2
inAtom == *target2
) {
492
2
      *kind = negDelta32;
493
2
      *addend = encodedAddend - offsetInAtom;
494
2
      *target = fromTarget;
495
2
    } else
496
0
      return llvm::make_error<GenericError>("Invalid pointer diff");
497
6
    return llvm::Error::success();
498
6
  }
499
2
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
500
2
        X86_64_RELOC_UNSIGNED              | rLength8):
501
2
    if (fromTarget != inAtom)
502
0
      return llvm::make_error<GenericError>("pointer diff not in base atom");
503
2
    *kind = delta64Anon;
504
2
    targetAddress = offsetInAtom + (int64_t)*(const little64_t *)fixupContent;
505
2
    return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
506
2
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
507
2
        X86_64_RELOC_UNSIGNED              | rLength4):
508
2
    if (fromTarget != inAtom)
509
0
      return llvm::make_error<GenericError>("pointer diff not in base atom");
510
2
    *kind = delta32Anon;
511
2
    targetAddress = offsetInAtom + (int32_t)*(const little32_t *)fixupContent;
512
2
    return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
513
0
  default:
514
0
    return llvm::make_error<GenericError>("unknown pair");
515
0
  }
516
0
}
517
518
void ArchHandler_x86_64::generateAtomContent(
519
    const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
520
    FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
521
371
    llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
522
371
  // Copy raw bytes.
523
371
  std::copy(atom.rawContent().begin(), atom.rawContent().end(),
524
371
            atomContentBuffer.begin());
525
371
  // Apply fix-ups.
526
285
  for (const Reference *ref : atom) {
527
285
    uint32_t offset = ref->offsetInAtom();
528
285
    const Atom *target = ref->target();
529
285
    uint64_t targetAddress = 0;
530
285
    if (isa<DefinedAtom>(target))
531
226
      targetAddress = findAddress(*target);
532
285
    uint64_t atomAddress = findAddress(atom);
533
285
    uint64_t fixupAddress = atomAddress + offset;
534
285
    if (
relocatable285
) {
535
89
      applyFixupRelocatable(*ref, &atomContentBuffer[offset],
536
89
                                        fixupAddress, targetAddress,
537
89
                                        atomAddress);
538
285
    } else {
539
196
      applyFixupFinal(*ref, &atomContentBuffer[offset],
540
196
                      fixupAddress, targetAddress,
541
196
                      atomAddress, imageBaseAddress, findSectionAddress);
542
196
    }
543
285
  }
544
371
}
545
546
void ArchHandler_x86_64::applyFixupFinal(
547
    const Reference &ref, uint8_t *loc, uint64_t fixupAddress,
548
    uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress,
549
196
    FindAddressForAtom findSectionAddress) {
550
196
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
551
0
    return;
552
196
  assert(ref.kindArch() == Reference::KindArch::x86_64);
553
196
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
554
196
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
555
196
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
556
69
  case branch32:
557
69
  case ripRel32:
558
69
  case ripRel32Anon:
559
69
  case ripRel32Got:
560
69
  case ripRel32GotLoad:
561
69
  case ripRel32Tlv:
562
69
    *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
563
69
    return;
564
41
  case pointer64:
565
41
  case pointer64Anon:
566
41
    *loc64 = targetAddress + ref.addend();
567
41
    return;
568
1
  case tlvInitSectionOffset:
569
1
    *loc64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
570
1
    return;
571
0
  case ripRel32Minus1:
572
0
  case ripRel32Minus1Anon:
573
0
    *loc32 = targetAddress - (fixupAddress + 5) + ref.addend();
574
0
    return;
575
0
  case ripRel32Minus2:
576
0
  case ripRel32Minus2Anon:
577
0
    *loc32 = targetAddress - (fixupAddress + 6) + ref.addend();
578
0
    return;
579
0
  case ripRel32Minus4:
580
0
  case ripRel32Minus4Anon:
581
0
    *loc32 = targetAddress - (fixupAddress + 8) + ref.addend();
582
0
    return;
583
0
  case delta32:
584
0
  case delta32Anon:
585
0
    *loc32 = targetAddress - fixupAddress + ref.addend();
586
0
    return;
587
3
  case delta64:
588
3
  case delta64Anon:
589
3
  case unwindFDEToFunction:
590
3
    *loc64 = targetAddress - fixupAddress + ref.addend();
591
3
    return;
592
0
  case ripRel32GotLoadNowLea:
593
0
    // Change MOVQ to LEA
594
0
    assert(loc[-2] == 0x8B);
595
0
    loc[-2] = 0x8D;
596
0
    *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
597
0
    return;
598
0
  case negDelta64:
599
0
    *loc64 = fixupAddress - targetAddress + ref.addend();
600
0
    return;
601
3
  case negDelta32:
602
3
    *loc32 = fixupAddress - targetAddress + ref.addend();
603
3
    return;
604
11
  case lazyPointer:
605
11
    // Do nothing
606
11
    return;
607
22
  case lazyImmediateLocation:
608
22
    *loc32 = ref.addend();
609
22
    return;
610
43
  case imageOffset:
611
43
  case imageOffsetGot:
612
43
    *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
613
43
    return;
614
3
  case unwindInfoToEhFrame: {
615
3
    uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
616
3
    assert(val < 0xffffffU && "offset in __eh_frame too large");
617
3
    *loc32 = (*loc32 & 0xff000000U) | val;
618
3
    return;
619
43
  }
620
0
  case invalid:
621
0
    // Fall into llvm_unreachable().
622
0
    break;
623
0
  }
624
0
  
llvm_unreachable0
("invalid x86_64 Reference Kind");
625
0
}
626
627
void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
628
                                               uint8_t *loc,
629
                                               uint64_t fixupAddress,
630
                                               uint64_t targetAddress,
631
89
                                               uint64_t inAtomAddress)  {
632
89
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
633
2
    return;
634
89
  assert(ref.kindArch() == Reference::KindArch::x86_64);
635
87
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
636
87
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
637
87
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
638
16
  case branch32:
639
16
  case ripRel32:
640
16
  case ripRel32Got:
641
16
  case ripRel32GotLoad:
642
16
  case ripRel32Tlv:
643
16
    *loc32 = ref.addend();
644
16
    return;
645
2
  case ripRel32Anon:
646
2
    *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
647
2
    return;
648
21
  case tlvInitSectionOffset:
649
21
  case pointer64:
650
21
    *loc64 = ref.addend();
651
21
    return;
652
6
  case pointer64Anon:
653
6
    *loc64 = targetAddress + ref.addend();
654
6
    return;
655
2
  case ripRel32Minus1:
656
2
    *loc32 = ref.addend() - 1;
657
2
    return;
658
2
  case ripRel32Minus1Anon:
659
2
    *loc32 = (targetAddress - (fixupAddress + 5)) + ref.addend();
660
2
    return;
661
2
  case ripRel32Minus2:
662
2
    *loc32 = ref.addend() - 2;
663
2
    return;
664
2
  case ripRel32Minus2Anon:
665
2
    *loc32 = (targetAddress - (fixupAddress + 6)) + ref.addend();
666
2
    return;
667
2
  case ripRel32Minus4:
668
2
    *loc32 = ref.addend() - 4;
669
2
    return;
670
2
  case ripRel32Minus4Anon:
671
2
    *loc32 = (targetAddress - (fixupAddress + 8)) + ref.addend();
672
2
    return;
673
4
  case delta32:
674
4
    *loc32 = ref.addend() + inAtomAddress - fixupAddress;
675
4
    return;
676
2
  case delta32Anon:
677
2
    // The value we write here should be the the delta to the target
678
2
    // after taking in to account the difference from the fixup back to the
679
2
    // last defined label
680
2
    // ie, if we have:
681
2
    // _base: ...
682
2
    // Lfixup: .quad Ltarget - .
683
2
    // ...
684
2
    // Ltarget:
685
2
    //
686
2
    // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
687
2
    *loc32 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
688
2
    return;
689
6
  case delta64:
690
6
    *loc64 = ref.addend() + inAtomAddress - fixupAddress;
691
6
    return;
692
2
  case delta64Anon:
693
2
    // The value we write here should be the the delta to the target
694
2
    // after taking in to account the difference from the fixup back to the
695
2
    // last defined label
696
2
    // ie, if we have:
697
2
    // _base: ...
698
2
    // Lfixup: .quad Ltarget - .
699
2
    // ...
700
2
    // Ltarget:
701
2
    //
702
2
    // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
703
2
    *loc64 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
704
2
    return;
705
2
  case negDelta64:
706
2
    *loc64 = ref.addend() + fixupAddress - inAtomAddress;
707
2
    return;
708
7
  case negDelta32:
709
7
    *loc32 = ref.addend() + fixupAddress - inAtomAddress;
710
7
    return;
711
0
  case ripRel32GotLoadNowLea:
712
0
    llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
713
0
    return;
714
0
  case lazyPointer:
715
0
  case lazyImmediateLocation:
716
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
717
0
    return;
718
0
  case imageOffset:
719
0
  case imageOffsetGot:
720
0
  case unwindInfoToEhFrame:
721
0
    llvm_unreachable("fixup implies __unwind_info");
722
0
    return;
723
7
  case unwindFDEToFunction:
724
7
    // Do nothing for now
725
7
    return;
726
0
  case invalid:
727
0
    // Fall into llvm_unreachable().
728
0
    break;
729
0
  }
730
0
  
llvm_unreachable0
("unknown x86_64 Reference Kind");
731
0
}
732
733
void ArchHandler_x86_64::appendSectionRelocations(
734
                                   const DefinedAtom &atom,
735
                                   uint64_t atomSectionOffset,
736
                                   const Reference &ref,
737
                                   FindSymbolIndexForAtom symbolIndexForAtom,
738
                                   FindSectionIndexForAtom sectionIndexForAtom,
739
                                   FindAddressForAtom addressForAtom,
740
77
                                   normalized::Relocations &relocs) {
741
77
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
742
2
    return;
743
77
  assert(ref.kindArch() == Reference::KindArch::x86_64);
744
75
  uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
745
75
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
746
5
  case branch32:
747
5
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
748
5
                X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4);
749
5
    return;
750
4
  case ripRel32:
751
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
752
4
                X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4 );
753
4
    return;
754
2
  case ripRel32Anon:
755
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
756
2
                X86_64_RELOC_SIGNED | rPcRel           | rLength4 );
757
2
    return;
758
3
  case ripRel32Got:
759
3
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
760
3
                X86_64_RELOC_GOT | rPcRel | rExtern | rLength4 );
761
3
    return;
762
2
  case ripRel32GotLoad:
763
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
764
2
                X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4 );
765
2
    return;
766
2
  case ripRel32Tlv:
767
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
768
2
                X86_64_RELOC_TLV | rPcRel | rExtern | rLength4 );
769
2
    return;
770
21
  case tlvInitSectionOffset:
771
21
  case pointer64:
772
21
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
773
21
                X86_64_RELOC_UNSIGNED  | rExtern | rLength8);
774
21
    return;
775
6
  case pointer64Anon:
776
6
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
777
6
                X86_64_RELOC_UNSIGNED | rLength8);
778
6
    return;
779
2
  case ripRel32Minus1:
780
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
781
2
                X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4 );
782
2
    return;
783
2
  case ripRel32Minus1Anon:
784
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
785
2
                X86_64_RELOC_SIGNED_1 | rPcRel           | rLength4 );
786
2
    return;
787
2
  case ripRel32Minus2:
788
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
789
2
                X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4 );
790
2
    return;
791
2
  case ripRel32Minus2Anon:
792
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
793
2
                X86_64_RELOC_SIGNED_2 | rPcRel           | rLength4 );
794
2
    return;
795
2
  case ripRel32Minus4:
796
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
797
2
                X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4 );
798
2
    return;
799
2
  case ripRel32Minus4Anon:
800
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
801
2
                X86_64_RELOC_SIGNED_4 | rPcRel           | rLength4 );
802
2
    return;
803
4
  case delta32:
804
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
805
4
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
806
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
807
4
                X86_64_RELOC_UNSIGNED   | rExtern | rLength4 );
808
4
    return;
809
2
  case delta32Anon:
810
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
811
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
812
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
813
2
                X86_64_RELOC_UNSIGNED             | rLength4 );
814
2
    return;
815
6
  case delta64:
816
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
817
6
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
818
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
819
6
                X86_64_RELOC_UNSIGNED   | rExtern | rLength8 );
820
6
    return;
821
2
  case delta64Anon:
822
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
823
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
824
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
825
2
                X86_64_RELOC_UNSIGNED             | rLength8 );
826
2
    return;
827
0
  case unwindFDEToFunction:
828
0
  case unwindInfoToEhFrame:
829
0
    return;
830
2
  case negDelta32:
831
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
832
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
833
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
834
2
                X86_64_RELOC_UNSIGNED   | rExtern | rLength4 );
835
2
    return;
836
2
  case negDelta64:
837
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
838
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
839
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
840
2
                X86_64_RELOC_UNSIGNED   | rExtern | rLength8 );
841
2
    return;
842
0
  case ripRel32GotLoadNowLea:
843
0
    llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
844
0
    return;
845
0
  case lazyPointer:
846
0
  case lazyImmediateLocation:
847
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
848
0
    return;
849
0
  case imageOffset:
850
0
  case imageOffsetGot:
851
0
    llvm_unreachable("__unwind_info references should have been resolved");
852
0
    return;
853
0
  case invalid:
854
0
    // Fall into llvm_unreachable().
855
0
    break;
856
0
  }
857
0
  
llvm_unreachable0
("unknown x86_64 Reference Kind");
858
0
}
859
860
281
std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_x86_64() {
861
281
  return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_x86_64());
862
281
}
863
864
} // namespace mach_o
865
} // namespace lld