Coverage Report

Created: 2019-01-18 03:29

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/lld/lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- lib/FileFormat/MachO/ArchHandler_x86_64.cpp ------------------------===//
2
//
3
//                             The LLVM Linker
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "ArchHandler.h"
11
#include "Atoms.h"
12
#include "MachONormalizedFileBinaryUtils.h"
13
#include "llvm/ADT/StringRef.h"
14
#include "llvm/ADT/StringSwitch.h"
15
#include "llvm/ADT/Triple.h"
16
#include "llvm/Support/Endian.h"
17
#include "llvm/Support/ErrorHandling.h"
18
19
using namespace llvm::MachO;
20
using namespace lld::mach_o::normalized;
21
22
namespace lld {
23
namespace mach_o {
24
25
using llvm::support::ulittle32_t;
26
using llvm::support::ulittle64_t;
27
28
using llvm::support::little32_t;
29
using llvm::support::little64_t;
30
31
class ArchHandler_x86_64 : public ArchHandler {
32
public:
33
282
  ArchHandler_x86_64() = default;
34
282
  ~ArchHandler_x86_64() override = default;
35
36
154
  const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
37
38
363
  Reference::KindArch kindArch() override {
39
363
    return Reference::KindArch::x86_64;
40
363
  }
41
42
  /// Used by GOTPass to locate GOT References
43
189
  bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
44
189
    if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
45
0
      return false;
46
189
    assert(ref.kindArch() == Reference::KindArch::x86_64);
47
189
    switch (ref.kindValue()) {
48
189
    case ripRel32GotLoad:
49
8
      canBypassGOT = true;
50
8
      return true;
51
189
    case ripRel32Got:
52
0
      canBypassGOT = false;
53
0
      return true;
54
189
    case imageOffsetGot:
55
1
      canBypassGOT = false;
56
1
      return true;
57
189
    default:
58
180
      return false;
59
189
    }
60
189
  }
61
62
198
  bool isTLVAccess(const Reference &ref) const override {
63
198
    assert(ref.kindNamespace() == Reference::KindNamespace::mach_o);
64
198
    assert(ref.kindArch() == Reference::KindArch::x86_64);
65
198
    return ref.kindValue() == ripRel32Tlv;
66
198
  }
67
68
1
  void updateReferenceToTLV(const Reference *ref) override {
69
1
    assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70
1
    assert(ref->kindArch() == Reference::KindArch::x86_64);
71
1
    assert(ref->kindValue() == ripRel32Tlv);
72
1
    const_cast<Reference*>(ref)->setKindValue(ripRel32);
73
1
  }
74
75
  /// Used by GOTPass to update GOT References
76
9
  void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
77
9
    assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
78
9
    assert(ref->kindArch() == Reference::KindArch::x86_64);
79
9
80
9
    switch (ref->kindValue()) {
81
9
    case ripRel32Got:
82
0
      assert(targetNowGOT && "target must be GOT");
83
0
      LLVM_FALLTHROUGH;
84
8
    case ripRel32GotLoad:
85
8
      const_cast<Reference *>(ref)
86
8
        ->setKindValue(targetNowGOT ? ripRel32 : 
ripRel32GotLoadNowLea0
);
87
8
      break;
88
1
    case imageOffsetGot:
89
1
      const_cast<Reference *>(ref)->setKindValue(imageOffset);
90
1
      break;
91
0
    default:
92
0
      llvm_unreachable("unknown GOT reference kind");
93
9
    }
94
9
  }
95
96
81
  bool needsCompactUnwind() override {
97
81
    return true;
98
81
  }
99
100
42
  Reference::KindValue imageOffsetKind() override {
101
42
    return imageOffset;
102
42
  }
103
104
1
  Reference::KindValue imageOffsetKindIndirect() override {
105
1
    return imageOffsetGot;
106
1
  }
107
108
0
  Reference::KindValue unwindRefToPersonalityFunctionKind() override {
109
0
    return ripRel32Got;
110
0
  }
111
112
14
  Reference::KindValue unwindRefToCIEKind() override {
113
14
    return negDelta32;
114
14
  }
115
116
36
  Reference::KindValue unwindRefToFunctionKind() override{
117
36
    return unwindFDEToFunction;
118
36
  }
119
120
11
  Reference::KindValue lazyImmediateLocationKind() override {
121
11
    return lazyImmediateLocation;
122
11
  }
123
124
3
  Reference::KindValue unwindRefToEhFrameKind() override {
125
3
    return unwindInfoToEhFrame;
126
3
  }
127
128
0
  Reference::KindValue pointerKind() override {
129
0
    return pointer64;
130
0
  }
131
132
47
  uint32_t dwarfCompactUnwindType() override {
133
47
    return 0x04000000U;
134
47
  }
135
136
387
  const StubInfo &stubInfo() override { return _sStubInfo; }
137
138
0
  bool isNonCallBranch(const Reference &) override {
139
0
    return false;
140
0
  }
141
142
  bool isCallSite(const Reference &) override;
143
  bool isPointer(const Reference &) override;
144
  bool isPairedReloc(const normalized::Relocation &) override;
145
146
  llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
147
                               const DefinedAtom *inAtom,
148
                               uint32_t offsetInAtom,
149
                               uint64_t fixupAddress, bool swap,
150
                               FindAtomBySectionAndAddress atomFromAddress,
151
                               FindAtomBySymbolIndex atomFromSymbolIndex,
152
                               Reference::KindValue *kind,
153
                               const lld::Atom **target,
154
                               Reference::Addend *addend) override;
155
  llvm::Error
156
      getPairReferenceInfo(const normalized::Relocation &reloc1,
157
                           const normalized::Relocation &reloc2,
158
                           const DefinedAtom *inAtom,
159
                           uint32_t offsetInAtom,
160
                           uint64_t fixupAddress, bool swap, bool scatterable,
161
                           FindAtomBySectionAndAddress atomFromAddress,
162
                           FindAtomBySymbolIndex atomFromSymbolIndex,
163
                           Reference::KindValue *kind,
164
                           const lld::Atom **target,
165
                           Reference::Addend *addend) override;
166
167
51
  bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
168
51
    return (atom->contentType() == DefinedAtom::typeCString);
169
51
  }
170
171
  void generateAtomContent(const DefinedAtom &atom, bool relocatable,
172
                           FindAddressForAtom findAddress,
173
                           FindAddressForAtom findSectionAddress,
174
                           uint64_t imageBase,
175
                    llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
176
177
  void appendSectionRelocations(const DefinedAtom &atom,
178
                                uint64_t atomSectionOffset,
179
                                const Reference &ref,
180
                                FindSymbolIndexForAtom symbolIndexForAtom,
181
                                FindSectionIndexForAtom sectionIndexForAtom,
182
                                FindAddressForAtom addressForAtom,
183
                                normalized::Relocations &relocs) override;
184
185
private:
186
  static const Registry::KindStrings _sKindStrings[];
187
  static const StubInfo              _sStubInfo;
188
189
  enum X86_64Kind: Reference::KindValue {
190
    invalid,               /// for error condition
191
192
    // Kinds found in mach-o .o files:
193
    branch32,              /// ex: call _foo
194
    ripRel32,              /// ex: movq _foo(%rip), %rax
195
    ripRel32Minus1,        /// ex: movb $0x12, _foo(%rip)
196
    ripRel32Minus2,        /// ex: movw $0x1234, _foo(%rip)
197
    ripRel32Minus4,        /// ex: movl $0x12345678, _foo(%rip)
198
    ripRel32Anon,          /// ex: movq L1(%rip), %rax
199
    ripRel32Minus1Anon,    /// ex: movb $0x12, L1(%rip)
200
    ripRel32Minus2Anon,    /// ex: movw $0x1234, L1(%rip)
201
    ripRel32Minus4Anon,    /// ex: movw $0x12345678, L1(%rip)
202
    ripRel32GotLoad,       /// ex: movq  _foo@GOTPCREL(%rip), %rax
203
    ripRel32Got,           /// ex: pushq _foo@GOTPCREL(%rip)
204
    ripRel32Tlv,           /// ex: movq  _foo@TLVP(%rip), %rdi
205
    pointer64,             /// ex: .quad _foo
206
    pointer64Anon,         /// ex: .quad L1
207
    delta64,               /// ex: .quad _foo - .
208
    delta32,               /// ex: .long _foo - .
209
    delta64Anon,           /// ex: .quad L1 - .
210
    delta32Anon,           /// ex: .long L1 - .
211
    negDelta64,            /// ex: .quad . - _foo
212
    negDelta32,            /// ex: .long . - _foo
213
214
    // Kinds introduced by Passes:
215
    ripRel32GotLoadNowLea, /// Target of GOT load is in linkage unit so
216
                           ///  "movq  _foo@GOTPCREL(%rip), %rax" can be changed
217
                           /// to "leaq _foo(%rip), %rax
218
    lazyPointer,           /// Location contains a lazy pointer.
219
    lazyImmediateLocation, /// Location contains immediate value used in stub.
220
221
    imageOffset,           /// Location contains offset of atom in final image
222
    imageOffsetGot,        /// Location contains offset of GOT entry for atom in
223
                           /// final image (typically personality function).
224
    unwindFDEToFunction,   /// Nearly delta64, but cannot be rematerialized in
225
                           /// relocatable object (yay for implicit contracts!).
226
    unwindInfoToEhFrame,   /// Fix low 24 bits of compact unwind encoding to
227
                           /// refer to __eh_frame entry.
228
    tlvInitSectionOffset   /// Location contains offset tlv init-value atom
229
                           /// within the __thread_data section.
230
  };
231
232
  Reference::KindValue kindFromReloc(const normalized::Relocation &reloc);
233
234
  void applyFixupFinal(const Reference &ref, uint8_t *location,
235
                       uint64_t fixupAddress, uint64_t targetAddress,
236
                       uint64_t inAtomAddress, uint64_t imageBaseAddress,
237
                       FindAddressForAtom findSectionAddress);
238
239
  void applyFixupRelocatable(const Reference &ref, uint8_t *location,
240
                             uint64_t fixupAddress,
241
                             uint64_t targetAddress,
242
                             uint64_t inAtomAddress);
243
};
244
245
const Registry::KindStrings ArchHandler_x86_64::_sKindStrings[] = {
246
  LLD_KIND_STRING_ENTRY(invalid), LLD_KIND_STRING_ENTRY(branch32),
247
  LLD_KIND_STRING_ENTRY(ripRel32), LLD_KIND_STRING_ENTRY(ripRel32Minus1),
248
  LLD_KIND_STRING_ENTRY(ripRel32Minus2), LLD_KIND_STRING_ENTRY(ripRel32Minus4),
249
  LLD_KIND_STRING_ENTRY(ripRel32Anon),
250
  LLD_KIND_STRING_ENTRY(ripRel32Minus1Anon),
251
  LLD_KIND_STRING_ENTRY(ripRel32Minus2Anon),
252
  LLD_KIND_STRING_ENTRY(ripRel32Minus4Anon),
253
  LLD_KIND_STRING_ENTRY(ripRel32GotLoad),
254
  LLD_KIND_STRING_ENTRY(ripRel32GotLoadNowLea),
255
  LLD_KIND_STRING_ENTRY(ripRel32Got), LLD_KIND_STRING_ENTRY(ripRel32Tlv),
256
  LLD_KIND_STRING_ENTRY(lazyPointer),
257
  LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
258
  LLD_KIND_STRING_ENTRY(pointer64), LLD_KIND_STRING_ENTRY(pointer64Anon),
259
  LLD_KIND_STRING_ENTRY(delta32), LLD_KIND_STRING_ENTRY(delta64),
260
  LLD_KIND_STRING_ENTRY(delta32Anon), LLD_KIND_STRING_ENTRY(delta64Anon),
261
  LLD_KIND_STRING_ENTRY(negDelta64),
262
  LLD_KIND_STRING_ENTRY(negDelta32),
263
  LLD_KIND_STRING_ENTRY(imageOffset), LLD_KIND_STRING_ENTRY(imageOffsetGot),
264
  LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
265
  LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
266
  LLD_KIND_STRING_ENTRY(tlvInitSectionOffset),
267
  LLD_KIND_STRING_END
268
};
269
270
const ArchHandler::StubInfo ArchHandler_x86_64::_sStubInfo = {
271
  "dyld_stub_binder",
272
273
  // Lazy pointer references
274
  { Reference::KindArch::x86_64, pointer64, 0, 0 },
275
  { Reference::KindArch::x86_64, lazyPointer, 0, 0 },
276
277
  // GOT pointer to dyld_stub_binder
278
  { Reference::KindArch::x86_64, pointer64, 0, 0 },
279
280
  // x86_64 code alignment 2^1
281
  1,
282
283
  // Stub size and code
284
  6,
285
  { 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 },       // jmp *lazyPointer
286
  { Reference::KindArch::x86_64, ripRel32, 2, 0 },
287
  { false, 0, 0, 0 },
288
289
  // Stub Helper size and code
290
  10,
291
  { 0x68, 0x00, 0x00, 0x00, 0x00,               // pushq $lazy-info-offset
292
    0xE9, 0x00, 0x00, 0x00, 0x00 },             // jmp helperhelper
293
  { Reference::KindArch::x86_64, lazyImmediateLocation, 1, 0 },
294
  { Reference::KindArch::x86_64, branch32, 6, 0 },
295
296
  // Stub helper image cache content type
297
  DefinedAtom::typeNonLazyPointer,
298
299
  // Stub Helper-Common size and code
300
  16,
301
  // Stub helper alignment
302
  2,
303
  { 0x4C, 0x8D, 0x1D, 0x00, 0x00, 0x00, 0x00,   // leaq cache(%rip),%r11
304
    0x41, 0x53,                                 // push %r11
305
    0xFF, 0x25, 0x00, 0x00, 0x00, 0x00,         // jmp *binder(%rip)
306
    0x90 },                                     // nop
307
  { Reference::KindArch::x86_64, ripRel32, 3, 0 },
308
  { false, 0, 0, 0 },
309
  { Reference::KindArch::x86_64, ripRel32, 11, 0 },
310
  { false, 0, 0, 0 }
311
312
};
313
314
81
bool ArchHandler_x86_64::isCallSite(const Reference &ref) {
315
81
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
316
0
    return false;
317
81
  assert(ref.kindArch() == Reference::KindArch::x86_64);
318
81
  return (ref.kindValue() == branch32);
319
81
}
320
321
196
bool ArchHandler_x86_64::isPointer(const Reference &ref) {
322
196
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
323
0
    return false;
324
196
  assert(ref.kindArch() == Reference::KindArch::x86_64);
325
196
  Reference::KindValue kind = ref.kindValue();
326
196
  return (kind == pointer64 || 
kind == pointer64Anon155
);
327
196
}
328
329
133
bool ArchHandler_x86_64::isPairedReloc(const Relocation &reloc) {
330
133
  return (reloc.type == X86_64_RELOC_SUBTRACTOR);
331
133
}
332
333
Reference::KindValue
334
115
ArchHandler_x86_64::kindFromReloc(const Relocation &reloc) {
335
115
  switch(relocPattern(reloc)) {
336
115
  case X86_64_RELOC_BRANCH   | rPcRel | rExtern | rLength4:
337
24
    return branch32;
338
115
  case X86_64_RELOC_SIGNED   | rPcRel | rExtern | rLength4:
339
8
    return ripRel32;
340
115
  case X86_64_RELOC_SIGNED   | rPcRel |           rLength4:
341
2
    return ripRel32Anon;
342
115
  case X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4:
343
2
    return ripRel32Minus1;
344
115
  case X86_64_RELOC_SIGNED_1 | rPcRel |           rLength4:
345
2
    return ripRel32Minus1Anon;
346
115
  case X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4:
347
2
    return ripRel32Minus2;
348
115
  case X86_64_RELOC_SIGNED_2 | rPcRel |           rLength4:
349
2
    return ripRel32Minus2Anon;
350
115
  case X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4:
351
2
    return ripRel32Minus4;
352
115
  case X86_64_RELOC_SIGNED_4 | rPcRel |           rLength4:
353
2
    return ripRel32Minus4Anon;
354
115
  case X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4:
355
10
    return ripRel32GotLoad;
356
115
  case X86_64_RELOC_GOT      | rPcRel | rExtern | rLength4:
357
3
    return ripRel32Got;
358
115
  case X86_64_RELOC_TLV      | rPcRel | rExtern | rLength4:
359
4
    return ripRel32Tlv;
360
115
  case X86_64_RELOC_UNSIGNED          | rExtern | rLength8:
361
37
    return pointer64;
362
115
  case X86_64_RELOC_UNSIGNED                    | rLength8:
363
15
    return pointer64Anon;
364
115
  default:
365
0
    return invalid;
366
115
  }
367
115
}
368
369
llvm::Error
370
ArchHandler_x86_64::getReferenceInfo(const Relocation &reloc,
371
                                    const DefinedAtom *inAtom,
372
                                    uint32_t offsetInAtom,
373
                                    uint64_t fixupAddress, bool swap,
374
                                    FindAtomBySectionAndAddress atomFromAddress,
375
                                    FindAtomBySymbolIndex atomFromSymbolIndex,
376
                                    Reference::KindValue *kind,
377
                                    const lld::Atom **target,
378
115
                                    Reference::Addend *addend) {
379
115
  *kind = kindFromReloc(reloc);
380
115
  if (*kind == invalid)
381
0
    return llvm::make_error<GenericError>("unknown type");
382
115
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
383
115
  uint64_t targetAddress;
384
115
  switch (*kind) {
385
115
  case branch32:
386
32
  case ripRel32:
387
32
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
388
0
      return ec;
389
32
    *addend = *(const little32_t *)fixupContent;
390
32
    return llvm::Error::success();
391
32
  case ripRel32Minus1:
392
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
393
0
      return ec;
394
2
    *addend = (int32_t)*(const little32_t *)fixupContent + 1;
395
2
    return llvm::Error::success();
396
2
  case ripRel32Minus2:
397
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
398
0
      return ec;
399
2
    *addend = (int32_t)*(const little32_t *)fixupContent + 2;
400
2
    return llvm::Error::success();
401
2
  case ripRel32Minus4:
402
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
403
0
      return ec;
404
2
    *addend = (int32_t)*(const little32_t *)fixupContent + 4;
405
2
    return llvm::Error::success();
406
2
  case ripRel32Anon:
407
2
    targetAddress = fixupAddress + 4 + *(const little32_t *)fixupContent;
408
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
409
2
  case ripRel32Minus1Anon:
410
2
    targetAddress = fixupAddress + 5 + *(const little32_t *)fixupContent;
411
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
412
2
  case ripRel32Minus2Anon:
413
2
    targetAddress = fixupAddress + 6 + *(const little32_t *)fixupContent;
414
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
415
2
  case ripRel32Minus4Anon:
416
2
    targetAddress = fixupAddress + 8 + *(const little32_t *)fixupContent;
417
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
418
17
  case ripRel32GotLoad:
419
17
  case ripRel32Got:
420
17
  case ripRel32Tlv:
421
17
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
422
0
      return ec;
423
17
    *addend = *(const little32_t *)fixupContent;
424
17
    return llvm::Error::success();
425
37
  case tlvInitSectionOffset:
426
37
  case pointer64:
427
37
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
428
0
      return ec;
429
37
    // If this is the 3rd pointer of a tlv-thunk (i.e. the pointer to the TLV's
430
37
    // initial value) we need to handle it specially.
431
37
    if (inAtom->contentType() == DefinedAtom::typeThunkTLV &&
432
37
        
offsetInAtom == 168
) {
433
4
      *kind = tlvInitSectionOffset;
434
4
      assert(*addend == 0 && "TLV-init has non-zero addend?");
435
4
    } else
436
33
      *addend = *(const little64_t *)fixupContent;
437
37
    return llvm::Error::success();
438
37
  case pointer64Anon:
439
15
    targetAddress = *(const little64_t *)fixupContent;
440
15
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
441
37
  default:
442
0
    llvm_unreachable("bad reloc kind");
443
115
  }
444
115
}
445
446
llvm::Error
447
ArchHandler_x86_64::getPairReferenceInfo(const normalized::Relocation &reloc1,
448
                                   const normalized::Relocation &reloc2,
449
                                   const DefinedAtom *inAtom,
450
                                   uint32_t offsetInAtom,
451
                                   uint64_t fixupAddress, bool swap,
452
                                   bool scatterable,
453
                                   FindAtomBySectionAndAddress atomFromAddress,
454
                                   FindAtomBySymbolIndex atomFromSymbolIndex,
455
                                   Reference::KindValue *kind,
456
                                   const lld::Atom **target,
457
18
                                   Reference::Addend *addend) {
458
18
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
459
18
  uint64_t targetAddress;
460
18
  const lld::Atom *fromTarget;
461
18
  if (auto ec = atomFromSymbolIndex(reloc1.symbol, &fromTarget))
462
0
    return ec;
463
18
464
18
  switch(relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
465
18
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
466
8
        X86_64_RELOC_UNSIGNED    | rExtern | rLength8): {
467
8
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
468
0
      return ec;
469
8
    uint64_t encodedAddend = (int64_t)*(const little64_t *)fixupContent;
470
8
    if (inAtom == fromTarget) {
471
6
      if (inAtom->contentType() == DefinedAtom::typeCFI)
472
0
        *kind = unwindFDEToFunction;
473
6
      else
474
6
        *kind = delta64;
475
6
      *addend = encodedAddend + offsetInAtom;
476
6
    } else 
if (2
inAtom == *target2
) {
477
2
      *kind = negDelta64;
478
2
      *addend = encodedAddend - offsetInAtom;
479
2
      *target = fromTarget;
480
2
    } else
481
0
      return llvm::make_error<GenericError>("Invalid pointer diff");
482
8
    return llvm::Error::success();
483
8
  }
484
8
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
485
6
        X86_64_RELOC_UNSIGNED    | rExtern | rLength4): {
486
6
    if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
487
0
      return ec;
488
6
    uint32_t encodedAddend = (int32_t)*(const little32_t *)fixupContent;
489
6
    if (inAtom == fromTarget) {
490
4
      *kind = delta32;
491
4
      *addend = encodedAddend + offsetInAtom;
492
4
    } else 
if (2
inAtom == *target2
) {
493
2
      *kind = negDelta32;
494
2
      *addend = encodedAddend - offsetInAtom;
495
2
      *target = fromTarget;
496
2
    } else
497
0
      return llvm::make_error<GenericError>("Invalid pointer diff");
498
6
    return llvm::Error::success();
499
6
  }
500
6
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
501
2
        X86_64_RELOC_UNSIGNED              | rLength8):
502
2
    if (fromTarget != inAtom)
503
0
      return llvm::make_error<GenericError>("pointer diff not in base atom");
504
2
    *kind = delta64Anon;
505
2
    targetAddress = offsetInAtom + (int64_t)*(const little64_t *)fixupContent;
506
2
    return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
507
2
  case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
508
2
        X86_64_RELOC_UNSIGNED              | rLength4):
509
2
    if (fromTarget != inAtom)
510
0
      return llvm::make_error<GenericError>("pointer diff not in base atom");
511
2
    *kind = delta32Anon;
512
2
    targetAddress = offsetInAtom + (int32_t)*(const little32_t *)fixupContent;
513
2
    return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
514
2
  default:
515
0
    return llvm::make_error<GenericError>("unknown pair");
516
18
  }
517
18
}
518
519
void ArchHandler_x86_64::generateAtomContent(
520
    const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
521
    FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
522
371
    llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
523
371
  // Copy raw bytes.
524
371
  std::copy(atom.rawContent().begin(), atom.rawContent().end(),
525
371
            atomContentBuffer.begin());
526
371
  // Apply fix-ups.
527
371
  for (const Reference *ref : atom) {
528
285
    uint32_t offset = ref->offsetInAtom();
529
285
    const Atom *target = ref->target();
530
285
    uint64_t targetAddress = 0;
531
285
    if (isa<DefinedAtom>(target))
532
226
      targetAddress = findAddress(*target);
533
285
    uint64_t atomAddress = findAddress(atom);
534
285
    uint64_t fixupAddress = atomAddress + offset;
535
285
    if (relocatable) {
536
89
      applyFixupRelocatable(*ref, &atomContentBuffer[offset],
537
89
                                        fixupAddress, targetAddress,
538
89
                                        atomAddress);
539
196
    } else {
540
196
      applyFixupFinal(*ref, &atomContentBuffer[offset],
541
196
                      fixupAddress, targetAddress,
542
196
                      atomAddress, imageBaseAddress, findSectionAddress);
543
196
    }
544
285
  }
545
371
}
546
547
void ArchHandler_x86_64::applyFixupFinal(
548
    const Reference &ref, uint8_t *loc, uint64_t fixupAddress,
549
    uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress,
550
196
    FindAddressForAtom findSectionAddress) {
551
196
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
552
0
    return;
553
196
  assert(ref.kindArch() == Reference::KindArch::x86_64);
554
196
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
555
196
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
556
196
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
557
196
  case branch32:
558
69
  case ripRel32:
559
69
  case ripRel32Anon:
560
69
  case ripRel32Got:
561
69
  case ripRel32GotLoad:
562
69
  case ripRel32Tlv:
563
69
    *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
564
69
    return;
565
69
  case pointer64:
566
41
  case pointer64Anon:
567
41
    *loc64 = targetAddress + ref.addend();
568
41
    return;
569
41
  case tlvInitSectionOffset:
570
1
    *loc64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
571
1
    return;
572
41
  case ripRel32Minus1:
573
0
  case ripRel32Minus1Anon:
574
0
    *loc32 = targetAddress - (fixupAddress + 5) + ref.addend();
575
0
    return;
576
0
  case ripRel32Minus2:
577
0
  case ripRel32Minus2Anon:
578
0
    *loc32 = targetAddress - (fixupAddress + 6) + ref.addend();
579
0
    return;
580
0
  case ripRel32Minus4:
581
0
  case ripRel32Minus4Anon:
582
0
    *loc32 = targetAddress - (fixupAddress + 8) + ref.addend();
583
0
    return;
584
0
  case delta32:
585
0
  case delta32Anon:
586
0
    *loc32 = targetAddress - fixupAddress + ref.addend();
587
0
    return;
588
3
  case delta64:
589
3
  case delta64Anon:
590
3
  case unwindFDEToFunction:
591
3
    *loc64 = targetAddress - fixupAddress + ref.addend();
592
3
    return;
593
3
  case ripRel32GotLoadNowLea:
594
0
    // Change MOVQ to LEA
595
0
    assert(loc[-2] == 0x8B);
596
0
    loc[-2] = 0x8D;
597
0
    *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
598
0
    return;
599
3
  case negDelta64:
600
0
    *loc64 = fixupAddress - targetAddress + ref.addend();
601
0
    return;
602
3
  case negDelta32:
603
3
    *loc32 = fixupAddress - targetAddress + ref.addend();
604
3
    return;
605
11
  case lazyPointer:
606
11
    // Do nothing
607
11
    return;
608
22
  case lazyImmediateLocation:
609
22
    *loc32 = ref.addend();
610
22
    return;
611
43
  case imageOffset:
612
43
  case imageOffsetGot:
613
43
    *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
614
43
    return;
615
43
  case unwindInfoToEhFrame: {
616
3
    uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
617
3
    assert(val < 0xffffffU && "offset in __eh_frame too large");
618
3
    *loc32 = (*loc32 & 0xff000000U) | val;
619
3
    return;
620
43
  }
621
43
  case invalid:
622
0
    // Fall into llvm_unreachable().
623
0
    break;
624
0
  }
625
0
  llvm_unreachable("invalid x86_64 Reference Kind");
626
0
}
627
628
void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
629
                                               uint8_t *loc,
630
                                               uint64_t fixupAddress,
631
                                               uint64_t targetAddress,
632
89
                                               uint64_t inAtomAddress)  {
633
89
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
634
2
    return;
635
87
  assert(ref.kindArch() == Reference::KindArch::x86_64);
636
87
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
637
87
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
638
87
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
639
87
  case branch32:
640
16
  case ripRel32:
641
16
  case ripRel32Got:
642
16
  case ripRel32GotLoad:
643
16
  case ripRel32Tlv:
644
16
    *loc32 = ref.addend();
645
16
    return;
646
16
  case ripRel32Anon:
647
2
    *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
648
2
    return;
649
21
  case tlvInitSectionOffset:
650
21
  case pointer64:
651
21
    *loc64 = ref.addend();
652
21
    return;
653
21
  case pointer64Anon:
654
6
    *loc64 = targetAddress + ref.addend();
655
6
    return;
656
21
  case ripRel32Minus1:
657
2
    *loc32 = ref.addend() - 1;
658
2
    return;
659
21
  case ripRel32Minus1Anon:
660
2
    *loc32 = (targetAddress - (fixupAddress + 5)) + ref.addend();
661
2
    return;
662
21
  case ripRel32Minus2:
663
2
    *loc32 = ref.addend() - 2;
664
2
    return;
665
21
  case ripRel32Minus2Anon:
666
2
    *loc32 = (targetAddress - (fixupAddress + 6)) + ref.addend();
667
2
    return;
668
21
  case ripRel32Minus4:
669
2
    *loc32 = ref.addend() - 4;
670
2
    return;
671
21
  case ripRel32Minus4Anon:
672
2
    *loc32 = (targetAddress - (fixupAddress + 8)) + ref.addend();
673
2
    return;
674
21
  case delta32:
675
4
    *loc32 = ref.addend() + inAtomAddress - fixupAddress;
676
4
    return;
677
21
  case delta32Anon:
678
2
    // The value we write here should be the delta to the target
679
2
    // after taking in to account the difference from the fixup back to the
680
2
    // last defined label
681
2
    // ie, if we have:
682
2
    // _base: ...
683
2
    // Lfixup: .quad Ltarget - .
684
2
    // ...
685
2
    // Ltarget:
686
2
    //
687
2
    // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
688
2
    *loc32 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
689
2
    return;
690
21
  case delta64:
691
6
    *loc64 = ref.addend() + inAtomAddress - fixupAddress;
692
6
    return;
693
21
  case delta64Anon:
694
2
    // The value we write here should be the delta to the target
695
2
    // after taking in to account the difference from the fixup back to the
696
2
    // last defined label
697
2
    // ie, if we have:
698
2
    // _base: ...
699
2
    // Lfixup: .quad Ltarget - .
700
2
    // ...
701
2
    // Ltarget:
702
2
    //
703
2
    // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
704
2
    *loc64 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
705
2
    return;
706
21
  case negDelta64:
707
2
    *loc64 = ref.addend() + fixupAddress - inAtomAddress;
708
2
    return;
709
21
  case negDelta32:
710
7
    *loc32 = ref.addend() + fixupAddress - inAtomAddress;
711
7
    return;
712
21
  case ripRel32GotLoadNowLea:
713
0
    llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
714
21
    
return0
;
715
21
  case lazyPointer:
716
0
  case lazyImmediateLocation:
717
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
718
0
    return;
719
0
  case imageOffset:
720
0
  case imageOffsetGot:
721
0
  case unwindInfoToEhFrame:
722
0
    llvm_unreachable("fixup implies __unwind_info");
723
0
    return;
724
7
  case unwindFDEToFunction:
725
7
    // Do nothing for now
726
7
    return;
727
0
  case invalid:
728
0
    // Fall into llvm_unreachable().
729
0
    break;
730
0
  }
731
0
  llvm_unreachable("unknown x86_64 Reference Kind");
732
0
}
733
734
void ArchHandler_x86_64::appendSectionRelocations(
735
                                   const DefinedAtom &atom,
736
                                   uint64_t atomSectionOffset,
737
                                   const Reference &ref,
738
                                   FindSymbolIndexForAtom symbolIndexForAtom,
739
                                   FindSectionIndexForAtom sectionIndexForAtom,
740
                                   FindAddressForAtom addressForAtom,
741
77
                                   normalized::Relocations &relocs) {
742
77
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
743
2
    return;
744
75
  assert(ref.kindArch() == Reference::KindArch::x86_64);
745
75
  uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
746
75
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
747
75
  case branch32:
748
5
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
749
5
                X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4);
750
5
    return;
751
75
  case ripRel32:
752
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
753
4
                X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4 );
754
4
    return;
755
75
  case ripRel32Anon:
756
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
757
2
                X86_64_RELOC_SIGNED | rPcRel           | rLength4 );
758
2
    return;
759
75
  case ripRel32Got:
760
3
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
761
3
                X86_64_RELOC_GOT | rPcRel | rExtern | rLength4 );
762
3
    return;
763
75
  case ripRel32GotLoad:
764
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
765
2
                X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4 );
766
2
    return;
767
75
  case ripRel32Tlv:
768
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
769
2
                X86_64_RELOC_TLV | rPcRel | rExtern | rLength4 );
770
2
    return;
771
75
  case tlvInitSectionOffset:
772
21
  case pointer64:
773
21
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
774
21
                X86_64_RELOC_UNSIGNED  | rExtern | rLength8);
775
21
    return;
776
21
  case pointer64Anon:
777
6
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
778
6
                X86_64_RELOC_UNSIGNED | rLength8);
779
6
    return;
780
21
  case ripRel32Minus1:
781
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
782
2
                X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4 );
783
2
    return;
784
21
  case ripRel32Minus1Anon:
785
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
786
2
                X86_64_RELOC_SIGNED_1 | rPcRel           | rLength4 );
787
2
    return;
788
21
  case ripRel32Minus2:
789
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
790
2
                X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4 );
791
2
    return;
792
21
  case ripRel32Minus2Anon:
793
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
794
2
                X86_64_RELOC_SIGNED_2 | rPcRel           | rLength4 );
795
2
    return;
796
21
  case ripRel32Minus4:
797
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
798
2
                X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4 );
799
2
    return;
800
21
  case ripRel32Minus4Anon:
801
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
802
2
                X86_64_RELOC_SIGNED_4 | rPcRel           | rLength4 );
803
2
    return;
804
21
  case delta32:
805
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
806
4
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
807
4
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
808
4
                X86_64_RELOC_UNSIGNED   | rExtern | rLength4 );
809
4
    return;
810
21
  case delta32Anon:
811
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
812
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
813
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
814
2
                X86_64_RELOC_UNSIGNED             | rLength4 );
815
2
    return;
816
21
  case delta64:
817
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
818
6
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
819
6
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
820
6
                X86_64_RELOC_UNSIGNED   | rExtern | rLength8 );
821
6
    return;
822
21
  case delta64Anon:
823
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
824
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
825
2
    appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
826
2
                X86_64_RELOC_UNSIGNED             | rLength8 );
827
2
    return;
828
21
  case unwindFDEToFunction:
829
0
  case unwindInfoToEhFrame:
830
0
    return;
831
2
  case negDelta32:
832
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
833
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
834
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
835
2
                X86_64_RELOC_UNSIGNED   | rExtern | rLength4 );
836
2
    return;
837
2
  case negDelta64:
838
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
839
2
                X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
840
2
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
841
2
                X86_64_RELOC_UNSIGNED   | rExtern | rLength8 );
842
2
    return;
843
0
  case ripRel32GotLoadNowLea:
844
0
    llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
845
0
    return;
846
0
  case lazyPointer:
847
0
  case lazyImmediateLocation:
848
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
849
0
    return;
850
0
  case imageOffset:
851
0
  case imageOffsetGot:
852
0
    llvm_unreachable("__unwind_info references should have been resolved");
853
0
    return;
854
0
  case invalid:
855
0
    // Fall into llvm_unreachable().
856
0
    break;
857
0
  }
858
0
  llvm_unreachable("unknown x86_64 Reference Kind");
859
0
}
860
861
282
std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_x86_64() {
862
282
  return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_x86_64());
863
282
}
864
865
} // namespace mach_o
866
} // namespace lld