Coverage Report

Created: 2017-09-21 03:39

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/tools/lld/lib/ReaderWriter/MachO/ArchHandler_x86.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- lib/FileFormat/MachO/ArchHandler_x86.cpp ---------------------------===//
2
//
3
//                             The LLVM Linker
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "ArchHandler.h"
11
#include "Atoms.h"
12
#include "MachONormalizedFileBinaryUtils.h"
13
#include "llvm/ADT/StringRef.h"
14
#include "llvm/ADT/StringSwitch.h"
15
#include "llvm/ADT/Triple.h"
16
#include "llvm/Support/Endian.h"
17
#include "llvm/Support/ErrorHandling.h"
18
19
using namespace llvm::MachO;
20
using namespace lld::mach_o::normalized;
21
22
namespace lld {
23
namespace mach_o {
24
25
using llvm::support::ulittle16_t;
26
using llvm::support::ulittle32_t;
27
28
using llvm::support::little16_t;
29
using llvm::support::little32_t;
30
31
class ArchHandler_x86 : public ArchHandler {
32
public:
33
52
  ArchHandler_x86() = default;
34
52
  ~ArchHandler_x86() override = default;
35
36
34
  const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
37
38
108
  Reference::KindArch kindArch() override { return Reference::KindArch::x86; }
39
40
48
  const StubInfo &stubInfo() override { return _sStubInfo; }
41
  bool isCallSite(const Reference &) override;
42
0
  bool isNonCallBranch(const Reference &) override {
43
0
    return false;
44
0
  }
45
46
  bool isPointer(const Reference &) override;
47
  bool isPairedReloc(const normalized::Relocation &) override;
48
49
4
  bool needsCompactUnwind() override {
50
4
    return false;
51
4
  }
52
53
0
  Reference::KindValue imageOffsetKind() override {
54
0
    return invalid;
55
0
  }
56
57
0
  Reference::KindValue imageOffsetKindIndirect() override {
58
0
    return invalid;
59
0
  }
60
61
0
  Reference::KindValue unwindRefToPersonalityFunctionKind() override {
62
0
    return invalid;
63
0
  }
64
65
4
  Reference::KindValue unwindRefToCIEKind() override {
66
4
    return negDelta32;
67
4
  }
68
69
4
  Reference::KindValue unwindRefToFunctionKind() override{
70
4
    return delta32;
71
4
  }
72
73
3
  Reference::KindValue lazyImmediateLocationKind() override {
74
3
    return lazyImmediateLocation;
75
3
  }
76
77
0
  Reference::KindValue unwindRefToEhFrameKind() override {
78
0
    return invalid;
79
0
  }
80
81
0
  Reference::KindValue pointerKind() override {
82
0
    return invalid;
83
0
  }
84
85
0
  uint32_t dwarfCompactUnwindType() override {
86
0
    return 0x04000000U;
87
0
  }
88
89
  llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
90
                               const DefinedAtom *inAtom,
91
                               uint32_t offsetInAtom,
92
                               uint64_t fixupAddress, bool swap,
93
                               FindAtomBySectionAndAddress atomFromAddress,
94
                               FindAtomBySymbolIndex atomFromSymbolIndex,
95
                               Reference::KindValue *kind,
96
                               const lld::Atom **target,
97
                               Reference::Addend *addend) override;
98
  llvm::Error
99
      getPairReferenceInfo(const normalized::Relocation &reloc1,
100
                           const normalized::Relocation &reloc2,
101
                           const DefinedAtom *inAtom,
102
                           uint32_t offsetInAtom,
103
                           uint64_t fixupAddress, bool swap, bool scatterable,
104
                           FindAtomBySectionAndAddress atomFromAddress,
105
                           FindAtomBySymbolIndex atomFromSymbolIndex,
106
                           Reference::KindValue *kind,
107
                           const lld::Atom **target,
108
                           Reference::Addend *addend) override;
109
110
  void generateAtomContent(const DefinedAtom &atom, bool relocatable,
111
                           FindAddressForAtom findAddress,
112
                           FindAddressForAtom findSectionAddress,
113
                           uint64_t imageBaseAddress,
114
                    llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
115
116
  void appendSectionRelocations(const DefinedAtom &atom,
117
                                uint64_t atomSectionOffset,
118
                                const Reference &ref,
119
                                FindSymbolIndexForAtom symbolIndexForAtom,
120
                                FindSectionIndexForAtom sectionIndexForAtom,
121
                                FindAddressForAtom addressForAtom,
122
                                normalized::Relocations &relocs) override;
123
124
101
  bool isDataInCodeTransition(Reference::KindValue refKind) override {
125
99
    return refKind == modeCode || refKind == modeData;
126
101
  }
127
128
  Reference::KindValue dataInCodeTransitionStart(
129
4
                                        const MachODefinedAtom &atom) override {
130
4
    return modeData;
131
4
  }
132
133
  Reference::KindValue dataInCodeTransitionEnd(
134
2
                                        const MachODefinedAtom &atom) override {
135
2
    return modeCode;
136
2
  }
137
138
private:
139
  static const Registry::KindStrings _sKindStrings[];
140
  static const StubInfo              _sStubInfo;
141
142
  enum X86Kind : Reference::KindValue {
143
    invalid,               /// for error condition
144
145
    modeCode,              /// Content starting at this offset is code.
146
    modeData,              /// Content starting at this offset is data.
147
148
    // Kinds found in mach-o .o files:
149
    branch32,              /// ex: call _foo
150
    branch16,              /// ex: callw _foo
151
    abs32,                 /// ex: movl _foo, %eax
152
    funcRel32,             /// ex: movl _foo-L1(%eax), %eax
153
    pointer32,             /// ex: .long _foo
154
    delta32,               /// ex: .long _foo - .
155
    negDelta32,            /// ex: .long . - _foo
156
157
    // Kinds introduced by Passes:
158
    lazyPointer,           /// Location contains a lazy pointer.
159
    lazyImmediateLocation, /// Location contains immediate value used in stub.
160
  };
161
162
  static bool useExternalRelocationTo(const Atom &target);
163
164
  void applyFixupFinal(const Reference &ref, uint8_t *location,
165
                       uint64_t fixupAddress, uint64_t targetAddress,
166
                       uint64_t inAtomAddress);
167
168
  void applyFixupRelocatable(const Reference &ref, uint8_t *location,
169
                             uint64_t fixupAddress,
170
                             uint64_t targetAddress,
171
                             uint64_t inAtomAddress);
172
};
173
174
//===----------------------------------------------------------------------===//
175
//  ArchHandler_x86
176
//===----------------------------------------------------------------------===//
177
178
const Registry::KindStrings ArchHandler_x86::_sKindStrings[] = {
179
  LLD_KIND_STRING_ENTRY(invalid),
180
  LLD_KIND_STRING_ENTRY(modeCode),
181
  LLD_KIND_STRING_ENTRY(modeData),
182
  LLD_KIND_STRING_ENTRY(branch32),
183
  LLD_KIND_STRING_ENTRY(branch16),
184
  LLD_KIND_STRING_ENTRY(abs32),
185
  LLD_KIND_STRING_ENTRY(funcRel32),
186
  LLD_KIND_STRING_ENTRY(pointer32),
187
  LLD_KIND_STRING_ENTRY(delta32),
188
  LLD_KIND_STRING_ENTRY(negDelta32),
189
  LLD_KIND_STRING_ENTRY(lazyPointer),
190
  LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
191
  LLD_KIND_STRING_END
192
};
193
194
const ArchHandler::StubInfo ArchHandler_x86::_sStubInfo = {
195
  "dyld_stub_binder",
196
197
  // Lazy pointer references
198
  { Reference::KindArch::x86, pointer32, 0, 0 },
199
  { Reference::KindArch::x86, lazyPointer, 0, 0 },
200
201
  // GOT pointer to dyld_stub_binder
202
  { Reference::KindArch::x86, pointer32, 0, 0 },
203
204
  // x86 code alignment
205
  1,
206
207
  // Stub size and code
208
  6,
209
  { 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 },       // jmp *lazyPointer
210
  { Reference::KindArch::x86, abs32, 2, 0 },
211
  { false, 0, 0, 0 },
212
213
  // Stub Helper size and code
214
  10,
215
  { 0x68, 0x00, 0x00, 0x00, 0x00,               // pushl $lazy-info-offset
216
    0xE9, 0x00, 0x00, 0x00, 0x00 },             // jmp helperhelper
217
  { Reference::KindArch::x86, lazyImmediateLocation, 1, 0 },
218
  { Reference::KindArch::x86, branch32, 6, 0 },
219
220
  // Stub helper image cache content type
221
  DefinedAtom::typeNonLazyPointer,
222
223
  // Stub Helper-Common size and code
224
  12,
225
  // Stub helper alignment
226
  2,
227
  { 0x68, 0x00, 0x00, 0x00, 0x00,               // pushl $dyld_ImageLoaderCache
228
    0xFF, 0x25, 0x00, 0x00, 0x00, 0x00,         // jmp *_fast_lazy_bind
229
    0x90 },                                     // nop
230
  { Reference::KindArch::x86, abs32, 1, 0 },
231
  { false, 0, 0, 0 },
232
  { Reference::KindArch::x86, abs32, 7, 0 },
233
  { false, 0, 0, 0 }
234
};
235
236
6
bool ArchHandler_x86::isCallSite(const Reference &ref) {
237
6
  return (ref.kindValue() == branch32);
238
6
}
239
240
33
bool ArchHandler_x86::isPointer(const Reference &ref) {
241
33
  return (ref.kindValue() == pointer32);
242
33
}
243
244
64
bool ArchHandler_x86::isPairedReloc(const Relocation &reloc) {
245
64
  if (!reloc.scattered)
246
37
    return false;
247
27
  return (reloc.type == GENERIC_RELOC_LOCAL_SECTDIFF) ||
248
14
         (reloc.type == GENERIC_RELOC_SECTDIFF);
249
64
}
250
251
llvm::Error
252
ArchHandler_x86::getReferenceInfo(const Relocation &reloc,
253
                                  const DefinedAtom *inAtom,
254
                                  uint32_t offsetInAtom,
255
                                  uint64_t fixupAddress, bool swap,
256
                                  FindAtomBySectionAndAddress atomFromAddress,
257
                                  FindAtomBySymbolIndex atomFromSymbolIndex,
258
                                  Reference::KindValue *kind,
259
                                  const lld::Atom **target,
260
45
                                  Reference::Addend *addend) {
261
45
  DefinedAtom::ContentPermissions perms;
262
45
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
263
45
  uint64_t targetAddress;
264
45
  switch (relocPattern(reloc)) {
265
11
  case GENERIC_RELOC_VANILLA | rPcRel | rExtern | rLength4:
266
11
    // ex: call _foo (and _foo undefined)
267
11
    *kind = branch32;
268
11
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
269
0
      return ec;
270
11
    *addend = fixupAddress + 4 + (int32_t)*(const little32_t *)fixupContent;
271
11
    break;
272
4
  case GENERIC_RELOC_VANILLA | rPcRel | rLength4:
273
4
    // ex: call _foo (and _foo defined)
274
4
    *kind = branch32;
275
4
    targetAddress =
276
4
        fixupAddress + 4 + (int32_t) * (const little32_t *)fixupContent;
277
4
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
278
0
    break;
279
2
  case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength4:
280
2
    // ex: call _foo+n (and _foo defined)
281
2
    *kind = branch32;
282
2
    targetAddress =
283
2
        fixupAddress + 4 + (int32_t) * (const little32_t *)fixupContent;
284
2
    if (auto ec = atomFromAddress(0, reloc.value, target, addend))
285
0
      return ec;
286
2
    *addend = targetAddress - reloc.value;
287
2
    break;
288
2
  case GENERIC_RELOC_VANILLA | rPcRel | rExtern | rLength2:
289
2
    // ex: callw _foo (and _foo undefined)
290
2
    *kind = branch16;
291
2
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
292
0
      return ec;
293
2
    *addend = fixupAddress + 2 + (int16_t)*(const little16_t *)fixupContent;
294
2
    break;
295
2
  case GENERIC_RELOC_VANILLA | rPcRel | rLength2:
296
2
    // ex: callw _foo (and _foo defined)
297
2
    *kind = branch16;
298
2
    targetAddress =
299
2
        fixupAddress + 2 + (int16_t) * (const little16_t *)fixupContent;
300
2
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
301
0
    break;
302
2
  case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength2:
303
2
    // ex: callw _foo+n (and _foo defined)
304
2
    *kind = branch16;
305
2
    targetAddress =
306
2
        fixupAddress + 2 + (int16_t) * (const little16_t *)fixupContent;
307
2
    if (auto ec = atomFromAddress(0, reloc.value, target, addend))
308
0
      return ec;
309
2
    *addend = targetAddress - reloc.value;
310
2
    break;
311
8
  case GENERIC_RELOC_VANILLA | rExtern | rLength4:
312
8
    // ex: movl _foo, %eax   (and _foo undefined)
313
8
    // ex: .long _foo        (and _foo undefined)
314
8
    perms = inAtom->permissions();
315
8
    *kind =
316
2
        ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32
317
6
                                                                 : pointer32;
318
8
    if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
319
0
      return ec;
320
8
    *addend = *(const ulittle32_t *)fixupContent;
321
8
    break;
322
10
  case GENERIC_RELOC_VANILLA | rLength4:
323
10
    // ex: movl _foo, %eax   (and _foo defined)
324
10
    // ex: .long _foo        (and _foo defined)
325
10
    perms = inAtom->permissions();
326
10
    *kind =
327
2
        ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32
328
8
                                                                 : pointer32;
329
10
    targetAddress = *(const ulittle32_t *)fixupContent;
330
10
    return atomFromAddress(reloc.symbol, targetAddress, target, addend);
331
0
    break;
332
4
  case GENERIC_RELOC_VANILLA | rScattered | rLength4:
333
4
    // ex: .long _foo+n      (and _foo defined)
334
4
    perms = inAtom->permissions();
335
4
    *kind =
336
2
        ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32
337
2
                                                                 : pointer32;
338
4
    if (auto ec = atomFromAddress(0, reloc.value, target, addend))
339
0
      return ec;
340
4
    *addend = *(const ulittle32_t *)fixupContent - reloc.value;
341
4
    break;
342
0
  default:
343
0
    return llvm::make_error<GenericError>("unsupported i386 relocation type");
344
29
  }
345
29
  return llvm::Error::success();
346
29
}
347
348
llvm::Error
349
ArchHandler_x86::getPairReferenceInfo(const normalized::Relocation &reloc1,
350
                                      const normalized::Relocation &reloc2,
351
                                      const DefinedAtom *inAtom,
352
                                      uint32_t offsetInAtom,
353
                                      uint64_t fixupAddress, bool swap,
354
                                      bool scatterable,
355
                                      FindAtomBySectionAndAddress atomFromAddr,
356
                                      FindAtomBySymbolIndex atomFromSymbolIndex,
357
                                      Reference::KindValue *kind,
358
                                      const lld::Atom **target,
359
19
                                      Reference::Addend *addend) {
360
19
  const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
361
19
  DefinedAtom::ContentPermissions perms = inAtom->permissions();
362
19
  uint32_t fromAddress;
363
19
  uint32_t toAddress;
364
19
  uint32_t value;
365
19
  const lld::Atom *fromTarget;
366
19
  Reference::Addend offsetInTo;
367
19
  Reference::Addend offsetInFrom;
368
19
  switch (relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
369
19
  case ((GENERIC_RELOC_SECTDIFF | rScattered | rLength4) << 16 |
370
19
         GENERIC_RELOC_PAIR | rScattered | rLength4):
371
19
  case ((GENERIC_RELOC_LOCAL_SECTDIFF | rScattered | rLength4) << 16 |
372
19
         GENERIC_RELOC_PAIR | rScattered | rLength4):
373
19
    toAddress = reloc1.value;
374
19
    fromAddress = reloc2.value;
375
19
    value = *(const little32_t *)fixupContent;
376
19
    if (auto ec = atomFromAddr(0, toAddress, target, &offsetInTo))
377
0
      return ec;
378
19
    
if (auto 19
ec19
= atomFromAddr(0, fromAddress, &fromTarget, &offsetInFrom))
379
0
      return ec;
380
19
    
if (19
fromTarget != inAtom19
) {
381
2
      if (*target != inAtom)
382
0
        return llvm::make_error<GenericError>(
383
0
            "SECTDIFF relocation where neither target is in atom");
384
2
      *kind = negDelta32;
385
2
      *addend = toAddress - value - fromAddress;
386
2
      *target = fromTarget;
387
19
    } else {
388
17
      if (
(perms & DefinedAtom::permR_X) == DefinedAtom::permR_X17
) {
389
11
        // SECTDIFF relocations are used in i386 codegen where the function
390
11
        // prolog does a CALL to the next instruction which POPs the return
391
11
        // address into EBX which becomes the pic-base register.  The POP
392
11
        // instruction is label the used for the subtrahend in expressions.
393
11
        // The funcRel32 kind represents the 32-bit delta to some symbol from
394
11
        // the start of the function (atom) containing the funcRel32.
395
11
        *kind = funcRel32;
396
11
        uint32_t ta = fromAddress + value - toAddress;
397
11
        *addend = ta - offsetInFrom;
398
17
      } else {
399
6
        *kind = delta32;
400
6
        *addend = fromAddress + value - toAddress;
401
6
      }
402
17
    }
403
19
    return llvm::Error::success();
404
0
    break;
405
0
  default:
406
0
    return llvm::make_error<GenericError>("unsupported i386 relocation type");
407
0
  }
408
0
}
409
410
void ArchHandler_x86::generateAtomContent(const DefinedAtom &atom,
411
                                          bool relocatable,
412
                                          FindAddressForAtom findAddress,
413
                                          FindAddressForAtom findSectionAddress,
414
                                          uint64_t imageBaseAddress,
415
67
                            llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
416
67
  // Copy raw bytes.
417
67
  std::copy(atom.rawContent().begin(), atom.rawContent().end(),
418
67
            atomContentBuffer.begin());
419
67
  // Apply fix-ups.
420
101
  for (const Reference *ref : atom) {
421
101
    uint32_t offset = ref->offsetInAtom();
422
101
    const Atom *target = ref->target();
423
101
    uint64_t targetAddress = 0;
424
101
    if (isa<DefinedAtom>(target))
425
77
      targetAddress = findAddress(*target);
426
101
    uint64_t atomAddress = findAddress(atom);
427
101
    uint64_t fixupAddress = atomAddress + offset;
428
101
    if (
relocatable101
) {
429
68
      applyFixupRelocatable(*ref, &atomContentBuffer[offset],
430
68
                                        fixupAddress, targetAddress,
431
68
                                        atomAddress);
432
101
    } else {
433
33
      applyFixupFinal(*ref, &atomContentBuffer[offset],
434
33
                                  fixupAddress, targetAddress,
435
33
                                  atomAddress);
436
33
    }
437
101
  }
438
67
}
439
440
void ArchHandler_x86::applyFixupFinal(const Reference &ref, uint8_t *loc,
441
                                      uint64_t fixupAddress,
442
                                      uint64_t targetAddress,
443
33
                                      uint64_t inAtomAddress) {
444
33
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
445
0
    return;
446
33
  assert(ref.kindArch() == Reference::KindArch::x86);
447
33
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
448
33
  switch (static_cast<X86Kind>(ref.kindValue())) {
449
6
  case branch32:
450
6
    *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
451
6
    break;
452
0
  case branch16:
453
0
    *loc32 = (targetAddress - (fixupAddress + 2)) + ref.addend();
454
0
    break;
455
15
  case pointer32:
456
15
  case abs32:
457
15
    *loc32 = targetAddress + ref.addend();
458
15
    break;
459
3
  case funcRel32:
460
3
    *loc32 = targetAddress - inAtomAddress + ref.addend();
461
3
    break;
462
0
  case delta32:
463
0
    *loc32 = targetAddress - fixupAddress + ref.addend();
464
0
    break;
465
0
  case negDelta32:
466
0
    *loc32 = fixupAddress - targetAddress + ref.addend();
467
0
    break;
468
3
  case modeCode:
469
3
  case modeData:
470
3
  case lazyPointer:
471
3
    // do nothing
472
3
    break;
473
6
  case lazyImmediateLocation:
474
6
    *loc32 = ref.addend();
475
6
    break;
476
0
  case invalid:
477
0
    llvm_unreachable("invalid x86 Reference Kind");
478
0
    break;
479
33
  }
480
33
}
481
482
void ArchHandler_x86::applyFixupRelocatable(const Reference &ref,
483
                                               uint8_t *loc,
484
                                               uint64_t fixupAddress,
485
                                               uint64_t targetAddress,
486
68
                                               uint64_t inAtomAddress) {
487
68
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
488
0
    return;
489
68
  assert(ref.kindArch() == Reference::KindArch::x86);
490
68
  bool useExternalReloc = useExternalRelocationTo(*ref.target());
491
68
  ulittle16_t *loc16 = reinterpret_cast<ulittle16_t *>(loc);
492
68
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
493
68
  switch (static_cast<X86Kind>(ref.kindValue())) {
494
14
  case branch32:
495
14
    if (useExternalReloc)
496
8
      *loc32 = ref.addend() - (fixupAddress + 4);
497
14
    else
498
6
      *loc32  =(targetAddress - (fixupAddress+4)) + ref.addend();
499
14
    break;
500
6
  case branch16:
501
6
    if (useExternalReloc)
502
2
      *loc16 = ref.addend() - (fixupAddress + 2);
503
6
    else
504
4
      *loc16 = (targetAddress - (fixupAddress+2)) + ref.addend();
505
6
    break;
506
22
  case pointer32:
507
22
  case abs32:
508
22
    *loc32 = targetAddress + ref.addend();
509
22
    break;
510
8
  case funcRel32:
511
8
    *loc32 = targetAddress - inAtomAddress + ref.addend(); // FIXME
512
8
    break;
513
8
  case delta32:
514
8
    *loc32 = targetAddress - fixupAddress + ref.addend();
515
8
    break;
516
4
  case negDelta32:
517
4
    *loc32 = fixupAddress - targetAddress + ref.addend();
518
4
    break;
519
6
  case modeCode:
520
6
  case modeData:
521
6
  case lazyPointer:
522
6
  case lazyImmediateLocation:
523
6
    // do nothing
524
6
    break;
525
0
  case invalid:
526
0
    llvm_unreachable("invalid x86 Reference Kind");
527
0
    break;
528
68
  }
529
68
}
530
531
128
bool ArchHandler_x86::useExternalRelocationTo(const Atom &target) {
532
128
  // Undefined symbols are referenced via external relocations.
533
128
  if (isa<UndefinedAtom>(&target))
534
36
    return true;
535
92
  
if (const DefinedAtom *92
defAtom92
= dyn_cast<DefinedAtom>(&target)) {
536
92
     switch (defAtom->merge()) {
537
0
     case DefinedAtom::mergeAsTentative:
538
0
       // Tentative definitions are referenced via external relocations.
539
0
       return true;
540
0
     case DefinedAtom::mergeAsWeak:
541
0
     case DefinedAtom::mergeAsWeakAndAddressUsed:
542
0
       // Global weak-defs are referenced via external relocations.
543
0
       return (defAtom->scope() == DefinedAtom::scopeGlobal);
544
92
     default:
545
92
       break;
546
92
    }
547
92
  }
548
92
  // Everything else is reference via an internal relocation.
549
92
  return false;
550
92
}
551
552
void ArchHandler_x86::appendSectionRelocations(
553
                                   const DefinedAtom &atom,
554
                                   uint64_t atomSectionOffset,
555
                                   const Reference &ref,
556
                                   FindSymbolIndexForAtom symbolIndexForAtom,
557
                                   FindSectionIndexForAtom sectionIndexForAtom,
558
                                   FindAddressForAtom addressForAtom,
559
60
                                   normalized::Relocations &relocs) {
560
60
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
561
0
    return;
562
60
  assert(ref.kindArch() == Reference::KindArch::x86);
563
60
  uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
564
60
  bool useExternalReloc = useExternalRelocationTo(*ref.target());
565
60
  switch (static_cast<X86Kind>(ref.kindValue())) {
566
6
  case modeCode:
567
6
  case modeData:
568
6
    break;
569
14
  case branch32:
570
14
    if (
useExternalReloc14
) {
571
8
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
572
8
                  GENERIC_RELOC_VANILLA | rExtern    | rPcRel | rLength4);
573
14
    } else {
574
6
      if (ref.addend() != 0)
575
2
        appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
576
2
                  GENERIC_RELOC_VANILLA | rScattered | rPcRel |  rLength4);
577
6
      else
578
4
        appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()),0,
579
4
                  GENERIC_RELOC_VANILLA |              rPcRel | rLength4);
580
6
    }
581
14
    break;
582
6
  case branch16:
583
6
    if (
useExternalReloc6
) {
584
2
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
585
2
                  GENERIC_RELOC_VANILLA | rExtern    | rPcRel | rLength2);
586
6
    } else {
587
4
      if (ref.addend() != 0)
588
2
        appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
589
2
                  GENERIC_RELOC_VANILLA | rScattered | rPcRel |  rLength2);
590
4
      else
591
2
        appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()),0,
592
2
                  GENERIC_RELOC_VANILLA |              rPcRel | rLength2);
593
4
    }
594
6
    break;
595
22
  case pointer32:
596
22
  case abs32:
597
22
    if (useExternalReloc)
598
8
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()),  0,
599
8
                GENERIC_RELOC_VANILLA |    rExtern     |  rLength4);
600
14
    else {
601
14
      if (ref.addend() != 0)
602
5
        appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
603
5
                GENERIC_RELOC_VANILLA |    rScattered  |  rLength4);
604
14
      else
605
9
        appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
606
9
                GENERIC_RELOC_VANILLA |                   rLength4);
607
14
    }
608
22
    break;
609
8
  case funcRel32:
610
8
    appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
611
8
              GENERIC_RELOC_SECTDIFF |  rScattered    | rLength4);
612
8
    appendReloc(relocs, sectionOffset, 0, addressForAtom(atom) - ref.addend(),
613
8
              GENERIC_RELOC_PAIR     |  rScattered    | rLength4);
614
8
    break;
615
4
  case delta32:
616
4
    appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
617
4
              GENERIC_RELOC_SECTDIFF |  rScattered    | rLength4);
618
4
    appendReloc(relocs, sectionOffset, 0, addressForAtom(atom) +
619
4
                                                           ref.offsetInAtom(),
620
4
              GENERIC_RELOC_PAIR     |  rScattered    | rLength4);
621
4
    break;
622
0
  case negDelta32:
623
0
    appendReloc(relocs, sectionOffset, 0, addressForAtom(atom) +
624
0
                                                           ref.offsetInAtom(),
625
0
              GENERIC_RELOC_SECTDIFF |  rScattered    | rLength4);
626
0
    appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
627
0
              GENERIC_RELOC_PAIR     |  rScattered    | rLength4);
628
0
    break;
629
0
  case lazyPointer:
630
0
  case lazyImmediateLocation:
631
0
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
632
0
    break;
633
0
  case invalid:
634
0
    llvm_unreachable("unknown x86 Reference Kind");
635
0
    break;
636
60
  }
637
60
}
638
639
52
std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_x86() {
640
52
  return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_x86());
641
52
}
642
643
} // namespace mach_o
644
} // namespace lld