Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/lld/ELF/AArch64ErrataFix.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- AArch64ErrataFix.cpp -----------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
// This file implements Section Patching for the purpose of working around
9
// errata in CPUs. The general principle is that an erratum sequence of one or
10
// more instructions is detected in the instruction stream, one of the
11
// instructions in the sequence is replaced with a branch to a patch sequence
12
// of replacement instructions. At the end of the replacement sequence the
13
// patch branches back to the instruction stream.
14
15
// This technique is only suitable for fixing an erratum when:
16
// - There is a set of necessary conditions required to trigger the erratum that
17
// can be detected at static link time.
18
// - There is a set of replacement instructions that can be used to remove at
19
// least one of the necessary conditions that trigger the erratum.
20
// - We can overwrite an instruction in the erratum sequence with a branch to
21
// the replacement sequence.
22
// - We can place the replacement sequence within range of the branch.
23
24
// FIXME:
25
// - The implementation here only supports one patch, the AArch64 Cortex-53
26
// errata 843419 that affects r0p0, r0p1, r0p2 and r0p4 versions of the core.
27
// To keep the initial version simple there is no support for multiple
28
// architectures or selection of different patches.
29
//===----------------------------------------------------------------------===//
30
31
#include "AArch64ErrataFix.h"
32
#include "Config.h"
33
#include "LinkerScript.h"
34
#include "OutputSections.h"
35
#include "Relocations.h"
36
#include "Symbols.h"
37
#include "SyntheticSections.h"
38
#include "Target.h"
39
#include "lld/Common/Memory.h"
40
#include "lld/Common/Strings.h"
41
#include "llvm/Support/Endian.h"
42
#include "llvm/Support/raw_ostream.h"
43
#include <algorithm>
44
45
using namespace llvm;
46
using namespace llvm::ELF;
47
using namespace llvm::object;
48
using namespace llvm::support;
49
using namespace llvm::support::endian;
50
51
using namespace lld;
52
using namespace lld::elf;
53
54
// Helper functions to identify instructions and conditions needed to trigger
55
// the Cortex-A53-843419 erratum.
56
57
// ADRP
58
// | 1 | immlo (2) | 1 | 0 0 0 0 | immhi (19) | Rd (5) |
59
209
static bool isADRP(uint32_t instr) {
60
209
  return (instr & 0x9f000000) == 0x90000000;
61
209
}
62
63
// Load and store bit patterns from ARMv8-A ARM ARM.
64
// Instructions appear in order of appearance starting from table in
65
// C4.1.3 Loads and Stores.
66
67
// All loads and stores have 1 (at bit postion 27), (0 at bit position 25).
68
// | op0 x op1 (2) | 1 op2 0 op3 (2) | x | op4 (5) | xxxx | op5 (2) | x (10) |
69
127
static bool isLoadStoreClass(uint32_t instr) {
70
127
  return (instr & 0x0a000000) == 0x08000000;
71
127
}
72
73
// LDN/STN multiple no offset
74
// | 0 Q 00 | 1100 | 0 L 00 | 0000 | opcode (4) | size (2) | Rn (5) | Rt (5) |
75
// LDN/STN multiple post-indexed
76
// | 0 Q 00 | 1100 | 1 L 0 | Rm (5)| opcode (4) | size (2) | Rn (5) | Rt (5) |
77
// L == 0 for stores.
78
79
// Utility routine to decode opcode field of LDN/STN multiple structure
80
// instructions to find the ST1 instructions.
81
// opcode == 0010 ST1 4 registers.
82
// opcode == 0110 ST1 3 registers.
83
// opcode == 0111 ST1 1 register.
84
// opcode == 1010 ST1 2 registers.
85
14
static bool isST1MultipleOpcode(uint32_t instr) {
86
14
  return (instr & 0x0000f000) == 0x00002000 ||
87
14
         (instr & 0x0000f000) == 0x00006000 ||
88
14
         (instr & 0x0000f000) == 0x00007000 ||
89
14
         
(instr & 0x0000f000) == 0x0000a0006
;
90
14
}
91
92
22
static bool isST1Multiple(uint32_t instr) {
93
22
  return (instr & 0xbfff0000) == 0x0c000000 && 
isST1MultipleOpcode(instr)4
;
94
22
}
95
96
// Writes to Rn (writeback).
97
113
static bool isST1MultiplePost(uint32_t instr) {
98
113
  return (instr & 0xbfe00000) == 0x0c800000 && 
isST1MultipleOpcode(instr)10
;
99
113
}
100
101
// LDN/STN single no offset
102
// | 0 Q 00 | 1101 | 0 L R 0 | 0000 | opc (3) S | size (2) | Rn (5) | Rt (5)|
103
// LDN/STN single post-indexed
104
// | 0 Q 00 | 1101 | 1 L R | Rm (5) | opc (3) S | size (2) | Rn (5) | Rt (5)|
105
// L == 0 for stores
106
107
// Utility routine to decode opcode field of LDN/STN single structure
108
// instructions to find the ST1 instructions.
109
// R == 0 for ST1 and ST3, R == 1 for ST2 and ST4.
110
// opcode == 000 ST1 8-bit.
111
// opcode == 010 ST1 16-bit.
112
// opcode == 100 ST1 32 or 64-bit (Size determines which).
113
8
static bool isST1SingleOpcode(uint32_t instr) {
114
8
  return (instr & 0x0040e000) == 0x00000000 ||
115
8
         
(instr & 0x0040e000) == 0x000040004
||
116
8
         
(instr & 0x0040e000) == 0x000080004
;
117
8
}
118
119
16
static bool isST1Single(uint32_t instr) {
120
16
  return (instr & 0xbfff0000) == 0x0d000000 && 
isST1SingleOpcode(instr)4
;
121
16
}
122
123
// Writes to Rn (writeback).
124
107
static bool isST1SinglePost(uint32_t instr) {
125
107
  return (instr & 0xbfe00000) == 0x0d800000 && 
isST1SingleOpcode(instr)4
;
126
107
}
127
128
22
static bool isST1(uint32_t instr) {
129
22
  return isST1Multiple(instr) || 
isST1MultiplePost(instr)20
||
130
22
         
isST1Single(instr)16
||
isST1SinglePost(instr)12
;
131
22
}
132
133
// Load/store exclusive
134
// | size (2) 00 | 1000 | o2 L o1 | Rs (5) | o0 | Rt2 (5) | Rn (5) | Rt (5) |
135
// L == 0 for Stores.
136
127
static bool isLoadStoreExclusive(uint32_t instr) {
137
127
  return (instr & 0x3f000000) == 0x08000000;
138
127
}
139
140
117
static bool isLoadExclusive(uint32_t instr) {
141
117
  return (instr & 0x3f400000) == 0x08400000;
142
117
}
143
144
// Load register literal
145
// | opc (2) 01 | 1 V 00 | imm19 | Rt (5) |
146
238
static bool isLoadLiteral(uint32_t instr) {
147
238
  return (instr & 0x3b000000) == 0x18000000;
148
238
}
149
150
// Load/store no-allocate pair
151
// (offset)
152
// | opc (2) 10 | 1 V 00 | 0 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
153
// L == 0 for stores.
154
// Never writes to register
155
34
static bool isSTNP(uint32_t instr) {
156
34
  return (instr & 0x3bc00000) == 0x28000000;
157
34
}
158
159
// Load/store register pair
160
// (post-indexed)
161
// | opc (2) 10 | 1 V 00 | 1 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
162
// L == 0 for stores, V == 0 for Scalar, V == 1 for Simd/FP
163
// Writes to Rn.
164
157
static bool isSTPPost(uint32_t instr) {
165
157
  return (instr & 0x3bc00000) == 0x28800000;
166
157
}
167
168
// (offset)
169
// | opc (2) 10 | 1 V 01 | 0 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
170
50
static bool isSTPOffset(uint32_t instr) {
171
50
  return (instr & 0x3bc00000) == 0x29000000;
172
50
}
173
174
// (pre-index)
175
// | opc (2) 10 | 1 V 01 | 1 L | imm7 | Rt2 (5) | Rn (5) | Rt (5) |
176
// Writes to Rn.
177
147
static bool isSTPPre(uint32_t instr) {
178
147
  return (instr & 0x3bc00000) == 0x29800000;
179
147
}
180
181
56
static bool isSTP(uint32_t instr) {
182
56
  return isSTPPost(instr) || 
isSTPOffset(instr)50
||
isSTPPre(instr)40
;
183
56
}
184
185
// Load/store register (unscaled immediate)
186
// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 00 | Rn (5) | Rt (5) |
187
// V == 0 for Scalar, V == 1 for Simd/FP.
188
234
static bool isLoadStoreUnscaled(uint32_t instr) {
189
234
  return (instr & 0x3b000c00) == 0x38000000;
190
234
}
191
192
// Load/store register (immediate post-indexed)
193
// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 01 | Rn (5) | Rt (5) |
194
337
static bool isLoadStoreImmediatePost(uint32_t instr) {
195
337
  return (instr & 0x3b200c00) == 0x38000400;
196
337
}
197
198
// Load/store register (unprivileged)
199
// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 10 | Rn (5) | Rt (5) |
200
218
static bool isLoadStoreUnpriv(uint32_t instr) {
201
218
  return (instr & 0x3b200c00) == 0x38000800;
202
218
}
203
204
// Load/store register (immediate pre-indexed)
205
// | size (2) 11 | 1 V 00 | opc (2) 0 | imm9 | 11 | Rn (5) | Rt (5) |
206
329
static bool isLoadStoreImmediatePre(uint32_t instr) {
207
329
  return (instr & 0x3b200c00) == 0x38000c00;
208
329
}
209
210
// Load/store register (register offset)
211
// | size (2) 11 | 1 V 00 | opc (2) 1 | Rm (5) | option (3) S | 10 | Rn | Rt |
212
206
static bool isLoadStoreRegisterOff(uint32_t instr) {
213
206
  return (instr & 0x3b200c00) == 0x38200800;
214
206
}
215
216
// Load/store register (unsigned immediate)
217
// | size (2) 11 | 1 V 01 | opc (2) | imm12 | Rn (5) | Rt (5) |
218
307
static bool isLoadStoreRegisterUnsigned(uint32_t instr) {
219
307
  return (instr & 0x3b000000) == 0x39000000;
220
307
}
221
222
// Rt is always in bit position 0 - 4.
223
177
static uint32_t getRt(uint32_t instr) { return (instr & 0x1f); }
224
225
// Rn is always in bit position 5 - 9.
226
110
static uint32_t getRn(uint32_t instr) { return (instr >> 5) & 0x1f; }
227
228
// C4.1.2 Branches, Exception Generating and System instructions
229
// | op0 (3) 1 | 01 op1 (4) | x (22) |
230
// op0 == 010 101 op1 == 0xxx Conditional Branch.
231
// op0 == 110 101 op1 == 1xxx Unconditional Branch Register.
232
// op0 == x00 101 op1 == xxxx Unconditional Branch immediate.
233
// op0 == x01 101 op1 == 0xxx Compare and branch immediate.
234
// op0 == x01 101 op1 == 1xxx Test and branch immediate.
235
60
static bool isBranch(uint32_t instr) {
236
60
  return ((instr & 0xfe000000) == 0xd6000000) || // Cond branch.
237
60
         
((instr & 0xfe000000) == 0x54000000)53
|| // Uncond branch reg.
238
60
         
((instr & 0x7c000000) == 0x14000000)52
|| // Uncond branch imm.
239
60
         
((instr & 0x7c000000) == 0x34000000)50
; // Compare and test branch.
240
60
}
241
242
234
static bool isV8SingleRegisterNonStructureLoadStore(uint32_t instr) {
243
234
  return isLoadStoreUnscaled(instr) || 
isLoadStoreImmediatePost(instr)226
||
244
234
         
isLoadStoreUnpriv(instr)218
||
isLoadStoreImmediatePre(instr)214
||
245
234
         
isLoadStoreRegisterOff(instr)206
||
isLoadStoreRegisterUnsigned(instr)202
;
246
234
}
247
248
// Note that this function refers to v8.0 only and does not include the
249
// additional load and store instructions added for in later revisions of
250
// the architecture such as the Atomic memory operations introduced
251
// in v8.1.
252
117
static bool isV8NonStructureLoad(uint32_t instr) {
253
117
  if (isLoadExclusive(instr))
254
2
    return true;
255
115
  if (isLoadLiteral(instr))
256
2
    return true;
257
113
  else if (isV8SingleRegisterNonStructureLoadStore(instr)) {
258
65
    // For Load and Store single register, Loads are derived from a
259
65
    // combination of the Size, V and Opc fields.
260
65
    uint32_t size = (instr >> 30) & 0xff;
261
65
    uint32_t v = (instr >> 26) & 0x1;
262
65
    uint32_t opc = (instr >> 22) & 0x3;
263
65
    // For the load and store instructions that we are decoding.
264
65
    // Opc == 0 are all stores.
265
65
    // Opc == 1 with a couple of exceptions are loads. The exceptions are:
266
65
    // Size == 00 (0), V == 1, Opc == 10 (2) which is a store and
267
65
    // Size == 11 (3), V == 0, Opc == 10 (2) which is a prefetch.
268
65
    return opc != 0 && 
!(46
size == 046
&&
v == 12
&&
opc == 20
) &&
269
65
           
!(46
size == 346
&&
v == 034
&&
opc == 234
);
270
65
  }
271
48
  return false;
272
48
}
273
274
// The following decode instructions are only complete up to the instructions
275
// needed for errata 843419.
276
277
// Instruction with writeback updates the index register after the load/store.
278
115
static bool hasWriteback(uint32_t instr) {
279
115
  return isLoadStoreImmediatePre(instr) || 
isLoadStoreImmediatePost(instr)111
||
280
115
         
isSTPPre(instr)107
||
isSTPPost(instr)101
||
isST1SinglePost(instr)95
||
281
115
         
isST1MultiplePost(instr)93
;
282
115
}
283
284
// For the load and store class of instructions, a load can write to the
285
// destination register, a load and a store can write to the base register when
286
// the instruction has writeback.
287
117
static bool doesLoadStoreWriteToReg(uint32_t instr, uint32_t reg) {
288
117
  return (isV8NonStructureLoad(instr) && 
getRt(instr) == reg50
) ||
289
117
         
(115
hasWriteback(instr)115
&&
getRn(instr) == reg26
);
290
117
}
291
292
// Scanner for Cortex-A53 errata 843419
293
// Full details are available in the Cortex A53 MPCore revision 0 Software
294
// Developers Errata Notice (ARM-EPM-048406).
295
//
296
// The instruction sequence that triggers the erratum is common in compiled
297
// AArch64 code, however it is sensitive to the offset of the sequence within
298
// a 4k page. This means that by scanning and fixing the patch after we have
299
// assigned addresses we only need to disassemble and fix instances of the
300
// sequence in the range of affected offsets.
301
//
302
// In summary the erratum conditions are a series of 4 instructions:
303
// 1.) An ADRP instruction that writes to register Rn with low 12 bits of
304
//     address of instruction either 0xff8 or 0xffc.
305
// 2.) A load or store instruction that can be:
306
// - A single register load or store, of either integer or vector registers.
307
// - An STP or STNP, of either integer or vector registers.
308
// - An Advanced SIMD ST1 store instruction.
309
// - Must not write to Rn, but may optionally read from it.
310
// 3.) An optional instruction that is not a branch and does not write to Rn.
311
// 4.) A load or store from the  Load/store register (unsigned immediate) class
312
//     that uses Rn as the base address register.
313
//
314
// Note that we do not attempt to scan for Sequence 2 as described in the
315
// Software Developers Errata Notice as this has been assessed to be extremely
316
// unlikely to occur in compiled code. This matches gold and ld.bfd behavior.
317
318
// Return true if the Instruction sequence Adrp, Instr2, and Instr4 match
319
// the erratum sequence. The Adrp, Instr2 and Instr4 correspond to 1.), 2.),
320
// and 4.) in the Scanner for Cortex-A53 errata comment above.
321
static bool is843419ErratumSequence(uint32_t instr1, uint32_t instr2,
322
209
                                    uint32_t instr4) {
323
209
  if (!isADRP(instr1))
324
82
    return false;
325
127
326
127
  uint32_t rn = getRt(instr1);
327
127
  return isLoadStoreClass(instr2) &&
328
127
         (isLoadStoreExclusive(instr2) || 
isLoadLiteral(instr2)123
||
329
127
          
isV8SingleRegisterNonStructureLoadStore(instr2)121
||
isSTP(instr2)56
||
330
127
          
isSTNP(instr2)34
||
isST1(instr2)22
) &&
331
127
         
!doesLoadStoreWriteToReg(instr2, rn)117
&&
332
127
         
isLoadStoreRegisterUnsigned(instr4)105
&&
getRn(instr4) == rn84
;
333
127
}
334
335
// Scan the instruction sequence starting at Offset Off from the base of
336
// InputSection IS. We update Off in this function rather than in the caller as
337
// we can skip ahead much further into the section when we know how many
338
// instructions we've scanned.
339
// Return the offset of the load or store instruction in IS that we want to
340
// patch or 0 if no patch required.
341
static uint64_t scanCortexA53Errata843419(InputSection *isec, uint64_t &off,
342
185
                                          uint64_t limit) {
343
185
  uint64_t isecAddr = isec->getVA(0);
344
185
345
185
  // Advance Off so that (ISAddr + Off) modulo 0x1000 is at least 0xff8.
346
185
  uint64_t initialPageOff = (isecAddr + off) & 0xfff;
347
185
  if (initialPageOff < 0xff8)
348
24
    off += 0xff8 - initialPageOff;
349
185
350
185
  bool optionalAllowed = limit - off > 12;
351
185
  if (off >= limit || 
limit - off < 12168
) {
352
25
    // Need at least 3 4-byte sized instructions to trigger erratum.
353
25
    off = limit;
354
25
    return 0;
355
25
  }
356
160
357
160
  uint64_t patchOff = 0;
358
160
  const uint8_t *buf = isec->data().begin();
359
160
  const ulittle32_t *instBuf = reinterpret_cast<const ulittle32_t *>(buf + off);
360
160
  uint32_t instr1 = *instBuf++;
361
160
  uint32_t instr2 = *instBuf++;
362
160
  uint32_t instr3 = *instBuf++;
363
160
  if (is843419ErratumSequence(instr1, instr2, instr3)) {
364
65
    patchOff = off + 8;
365
95
  } else if (optionalAllowed && 
!isBranch(instr3)60
) {
366
49
    uint32_t instr4 = *instBuf++;
367
49
    if (is843419ErratumSequence(instr1, instr2, instr4))
368
17
      patchOff = off + 12;
369
49
  }
370
160
  if (((isecAddr + off) & 0xfff) == 0xff8)
371
60
    off += 4;
372
100
  else
373
100
    off += 0xffc;
374
160
  return patchOff;
375
160
}
376
377
class lld::elf::Patch843419Section : public SyntheticSection {
378
public:
379
  Patch843419Section(InputSection *p, uint64_t off);
380
381
  void writeTo(uint8_t *buf) override;
382
383
240
  size_t getSize() const override { return 8; }
384
385
  uint64_t getLDSTAddr() const;
386
387
  // The Section we are patching.
388
  const InputSection *patchee;
389
  // The offset of the instruction in the Patchee section we are patching.
390
  uint64_t patcheeOffset;
391
  // A label for the start of the Patch that we can use as a relocation target.
392
  Symbol *patchSym;
393
};
394
395
lld::elf::Patch843419Section::Patch843419Section(InputSection *p, uint64_t off)
396
    : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 4,
397
                       ".text.patch"),
398
36
      patchee(p), patcheeOffset(off) {
399
36
  this->parent = p->getParent();
400
36
  patchSym = addSyntheticLocal(
401
36
      saver.save("__CortexA53843419_" + utohexstr(getLDSTAddr())), STT_FUNC, 0,
402
36
      getSize(), *this);
403
36
  addSyntheticLocal(saver.save("$x"), STT_NOTYPE, 0, 0, *this);
404
36
}
405
406
76
uint64_t lld::elf::Patch843419Section::getLDSTAddr() const {
407
76
  return patchee->getVA(patcheeOffset);
408
76
}
409
410
36
void lld::elf::Patch843419Section::writeTo(uint8_t *buf) {
411
36
  // Copy the instruction that we will be replacing with a branch in the
412
36
  // Patchee Section.
413
36
  write32le(buf, read32le(patchee->data().begin() + patcheeOffset));
414
36
415
36
  // Apply any relocation transferred from the original PatcheeSection.
416
36
  // For a SyntheticSection Buf already has outSecOff added, but relocateAlloc
417
36
  // also adds outSecOff so we need to subtract to avoid double counting.
418
36
  this->relocateAlloc(buf - outSecOff, buf - outSecOff + getSize());
419
36
420
36
  // Return address is the next instruction after the one we have just copied.
421
36
  uint64_t s = getLDSTAddr() + 4;
422
36
  uint64_t p = patchSym->getVA() + 4;
423
36
  target->relocateOne(buf + 4, R_AARCH64_JUMP26, s - p);
424
36
}
425
426
8
void AArch64Err843419Patcher::init() {
427
8
  // The AArch64 ABI permits data in executable sections. We must avoid scanning
428
8
  // this data as if it were instructions to avoid false matches. We use the
429
8
  // mapping symbols in the InputObjects to identify this data, caching the
430
8
  // results in sectionMap so we don't have to recalculate it each pass.
431
8
432
8
  // The ABI Section 4.5.4 Mapping symbols; defines local symbols that describe
433
8
  // half open intervals [Symbol Value, Next Symbol Value) of code and data
434
8
  // within sections. If there is no next symbol then the half open interval is
435
8
  // [Symbol Value, End of section). The type, code or data, is determined by
436
8
  // the mapping symbol name, $x for code, $d for data.
437
218
  auto isCodeMapSymbol = [](const Symbol *b) {
438
218
    return b->getName() == "$x" || b->getName().startswith("$x.");
439
218
  };
440
196
  auto isDataMapSymbol = [](const Symbol *b) {
441
196
    return b->getName() == "$d" || b->getName().startswith("$d.");
442
196
  };
443
8
444
8
  // Collect mapping symbols for every executable InputSection.
445
8
  for (InputFile *file : objectFiles) {
446
8
    auto *f = cast<ObjFile<ELF64LE>>(file);
447
147
    for (Symbol *b : f->getLocalSymbols()) {
448
147
      auto *def = dyn_cast<Defined>(b);
449
147
      if (!def)
450
0
        continue;
451
147
      if (!isCodeMapSymbol(def) && 
!isDataMapSymbol(def)79
)
452
9
        continue;
453
138
      if (auto *sec = dyn_cast_or_null<InputSection>(def->section))
454
137
        if (sec->flags & SHF_EXECINSTR)
455
131
          sectionMap[sec].push_back(def);
456
138
    }
457
8
  }
458
8
  // For each InputSection make sure the mapping symbols are in sorted in
459
8
  // ascending order and free from consecutive runs of mapping symbols with
460
8
  // the same type. For example we must remove the redundant $d.1 from $x.0
461
8
  // $d.0 $d.1 $x.1.
462
68
  for (auto &kv : sectionMap) {
463
68
    std::vector<const Defined *> &mapSyms = kv.second;
464
68
    if (mapSyms.size() <= 1)
465
17
      continue;
466
73
    
llvm::stable_sort(mapSyms, [](const Defined *a, const Defined *b) 51
{
467
73
      return a->value < b->value;
468
73
    });
469
51
    mapSyms.erase(
470
51
        std::unique(mapSyms.begin(), mapSyms.end(),
471
63
                    [=](const Defined *a, const Defined *b) {
472
63
                      return (isCodeMapSymbol(a) && 
isCodeMapSymbol(b)8
) ||
473
63
                             
(62
isDataMapSymbol(a)62
&&
isDataMapSymbol(b)55
);
474
63
                    }),
475
51
        mapSyms.end());
476
51
  }
477
8
  initialized = true;
478
8
}
479
480
// Insert the PatchSections we have created back into the
481
// InputSectionDescription. As inserting patches alters the addresses of
482
// InputSections that follow them, we try and place the patches after all the
483
// executable sections, although we may need to insert them earlier if the
484
// InputSectionDescription is larger than the maximum branch range.
485
void AArch64Err843419Patcher::insertPatches(
486
8
    InputSectionDescription &isd, std::vector<Patch843419Section *> &patches) {
487
8
  uint64_t isecLimit;
488
8
  uint64_t prevIsecLimit = isd.sections.front()->outSecOff;
489
8
  uint64_t patchUpperBound = prevIsecLimit + target->getThunkSectionSpacing();
490
8
  uint64_t outSecAddr = isd.sections.front()->getParent()->addr;
491
8
492
8
  // Set the outSecOff of patches to the place where we want to insert them.
493
8
  // We use a similar strategy to Thunk placement. Place patches roughly
494
8
  // every multiple of maximum branch range.
495
8
  auto patchIt = patches.begin();
496
8
  auto patchEnd = patches.end();
497
57
  for (const InputSection *isec : isd.sections) {
498
57
    isecLimit = isec->outSecOff + isec->getSize();
499
57
    if (isecLimit > patchUpperBound) {
500
6
      while (patchIt != patchEnd) {
501
4
        if ((*patchIt)->getLDSTAddr() - outSecAddr >= prevIsecLimit)
502
1
          break;
503
3
        (*patchIt)->outSecOff = prevIsecLimit;
504
3
        ++patchIt;
505
3
      }
506
3
      patchUpperBound = prevIsecLimit + target->getThunkSectionSpacing();
507
3
    }
508
57
    prevIsecLimit = isecLimit;
509
57
  }
510
41
  for (; patchIt != patchEnd; 
++patchIt33
) {
511
33
    (*patchIt)->outSecOff = isecLimit;
512
33
  }
513
8
514
8
  // merge all patch sections. We use the outSecOff assigned above to
515
8
  // determine the insertion point. This is ok as we only merge into an
516
8
  // InputSectionDescription once per pass, and at the end of the pass
517
8
  // assignAddresses() will recalculate all the outSecOff values.
518
8
  std::vector<InputSection *> tmp;
519
8
  tmp.reserve(isd.sections.size() + patches.size());
520
56
  auto mergeCmp = [](const InputSection *a, const InputSection *b) {
521
56
    if (a->outSecOff < b->outSecOff)
522
0
      return true;
523
56
    if (a->outSecOff == b->outSecOff && 
isa<Patch843419Section>(a)3
&&
524
56
        
!isa<Patch843419Section>(b)3
)
525
3
      return true;
526
53
    return false;
527
53
  };
528
8
  std::merge(isd.sections.begin(), isd.sections.end(), patches.begin(),
529
8
             patches.end(), std::back_inserter(tmp), mergeCmp);
530
8
  isd.sections = std::move(tmp);
531
8
}
532
533
// Given an erratum sequence that starts at address adrpAddr, with an
534
// instruction that we need to patch at patcheeOffset from the start of
535
// InputSection IS, create a Patch843419 Section and add it to the
536
// Patches that we need to insert.
537
static void implementPatch(uint64_t adrpAddr, uint64_t patcheeOffset,
538
                           InputSection *isec,
539
82
                           std::vector<Patch843419Section *> &patches) {
540
82
  // There may be a relocation at the same offset that we are patching. There
541
82
  // are four cases that we need to consider.
542
82
  // Case 1: R_AARCH64_JUMP26 branch relocation. We have already patched this
543
82
  // instance of the erratum on a previous patch and altered the relocation. We
544
82
  // have nothing more to do.
545
82
  // Case 2: A TLS Relaxation R_RELAX_TLS_IE_TO_LE. In this case the ADRP that
546
82
  // we read will be transformed into a MOVZ later so we actually don't match
547
82
  // the sequence and have nothing more to do.
548
82
  // Case 3: A load/store register (unsigned immediate) class relocation. There
549
82
  // are two of these R_AARCH_LD64_ABS_LO12_NC and R_AARCH_LD64_GOT_LO12_NC and
550
82
  // they are both absolute. We need to add the same relocation to the patch,
551
82
  // and replace the relocation with a R_AARCH_JUMP26 branch relocation.
552
82
  // Case 4: No relocation. We must create a new R_AARCH64_JUMP26 branch
553
82
  // relocation at the offset.
554
189
  auto relIt = llvm::find_if(isec->relocations, [=](const Relocation &r) {
555
189
    return r.offset == patcheeOffset;
556
189
  });
557
82
  if (relIt != isec->relocations.end() &&
558
82
      
(75
relIt->type == R_AARCH64_JUMP2675
||
relIt->expr == R_RELAX_TLS_IE_TO_LE30
))
559
46
    return;
560
36
561
36
  log("detected cortex-a53-843419 erratum sequence starting at " +
562
36
      utohexstr(adrpAddr) + " in unpatched output.");
563
36
564
36
  auto *ps = make<Patch843419Section>(isec, patcheeOffset);
565
36
  patches.push_back(ps);
566
36
567
36
  auto makeRelToPatch = [](uint64_t offset, Symbol *patchSym) {
568
36
    return Relocation{R_PC, R_AARCH64_JUMP26, offset, 0, patchSym};
569
36
  };
570
36
571
36
  if (relIt != isec->relocations.end()) {
572
29
    ps->relocations.push_back(
573
29
        {relIt->expr, relIt->type, 0, relIt->addend, relIt->sym});
574
29
    *relIt = makeRelToPatch(patcheeOffset, ps->patchSym);
575
29
  } else
576
7
    isec->relocations.push_back(makeRelToPatch(patcheeOffset, ps->patchSym));
577
36
}
578
579
// Scan all the instructions in InputSectionDescription, for each instance of
580
// the erratum sequence create a Patch843419Section. We return the list of
581
// Patch843419Sections that need to be applied to ISD.
582
std::vector<Patch843419Section *>
583
AArch64Err843419Patcher::patchInputSectionDescription(
584
35
    InputSectionDescription &isd) {
585
35
  std::vector<Patch843419Section *> patches;
586
190
  for (InputSection *isec : isd.sections) {
587
190
    //  LLD doesn't use the erratum sequence in SyntheticSections.
588
190
    if (isa<SyntheticSection>(isec))
589
49
      continue;
590
141
    // Use sectionMap to make sure we only scan code and not inline data.
591
141
    // We have already sorted MapSyms in ascending order and removed consecutive
592
141
    // mapping symbols of the same type. Our range of executable instructions to
593
141
    // scan is therefore [codeSym->value, dataSym->value) or [codeSym->value,
594
141
    // section size).
595
141
    std::vector<const Defined *> &mapSyms = sectionMap[isec];
596
141
597
213
    auto codeSym = llvm::find_if(mapSyms, [&](const Defined *ms) {
598
213
      return ms->getName().startswith("$x");
599
213
    });
600
141
601
159
    while (codeSym != mapSyms.end()) {
602
122
      auto dataSym = std::next(codeSym);
603
122
      uint64_t off = (*codeSym)->value;
604
122
      uint64_t limit =
605
122
          (dataSym == mapSyms.end()) ? 
isec->data().size()104
:
(*dataSym)->value18
;
606
122
607
307
      while (off < limit) {
608
185
        uint64_t startAddr = isec->getVA(off);
609
185
        if (uint64_t patcheeOffset = scanCortexA53Errata843419(isec, off, limit))
610
82
          implementPatch(startAddr, patcheeOffset, isec, patches);
611
185
      }
612
122
      if (dataSym == mapSyms.end())
613
104
        break;
614
18
      codeSym = std::next(dataSym);
615
18
    }
616
141
  }
617
35
  return patches;
618
35
}
619
620
// For each InputSectionDescription make one pass over the executable sections
621
// looking for the erratum sequence; creating a synthetic Patch843419Section
622
// for each instance found. We insert these synthetic patch sections after the
623
// executable code in each InputSectionDescription.
624
//
625
// PreConditions:
626
// The Output and Input Sections have had their final addresses assigned.
627
//
628
// PostConditions:
629
// Returns true if at least one patch was added. The addresses of the
630
// Ouptut and Input Sections may have been changed.
631
// Returns false if no patches were required and no changes were made.
632
16
bool AArch64Err843419Patcher::createFixes() {
633
16
  if (initialized == false)
634
8
    init();
635
16
636
16
  bool addressesChanged = false;
637
114
  for (OutputSection *os : outputSections) {
638
114
    if (!(os->flags & SHF_ALLOC) || 
!(os->flags & SHF_EXECINSTR)50
)
639
91
      continue;
640
23
    for (BaseCommand *bc : os->sectionCommands)
641
35
      if (auto *isd = dyn_cast<InputSectionDescription>(bc)) {
642
35
        std::vector<Patch843419Section *> patches =
643
35
            patchInputSectionDescription(*isd);
644
35
        if (!patches.empty()) {
645
8
          insertPatches(*isd, patches);
646
8
          addressesChanged = true;
647
8
        }
648
35
      }
649
23
  }
650
16
  return addressesChanged;
651
16
}