Coverage Report

Created: 2021-01-19 06:58

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// Implementation of the abstract lowering for the Swift calling convention.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "clang/CodeGen/SwiftCallingConv.h"
14
#include "clang/Basic/TargetInfo.h"
15
#include "CodeGenModule.h"
16
#include "TargetInfo.h"
17
18
using namespace clang;
19
using namespace CodeGen;
20
using namespace swiftcall;
21
22
2.54k
static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
23
2.54k
  return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
24
2.54k
}
25
26
50.4k
static bool isPowerOf2(unsigned n) {
27
50.4k
  return n == (n & -n);
28
50.4k
}
29
30
/// Given two types with the same size, try to find a common type.
31
32
static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
32
32
  assert(first != second);
33
34
  // Allow pointers to merge with integers, but prefer the integer type.
35
32
  if (first->isIntegerTy()) {
36
32
    if (second->isPointerTy()) 
return first0
;
37
0
  } else if (first->isPointerTy()) {
38
0
    if (second->isIntegerTy()) return second;
39
0
    if (second->isPointerTy()) return first;
40
41
  // Allow two vectors to be merged (given that they have the same size).
42
  // This assumes that we never have two different vector register sets.
43
0
  } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
44
0
    if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
45
0
      if (auto commonTy = getCommonType(firstVecTy->getElementType(),
46
0
                                        secondVecTy->getElementType())) {
47
0
        return (commonTy == firstVecTy->getElementType() ? first : second);
48
0
      }
49
32
    }
50
0
  }
51
52
32
  return nullptr;
53
32
}
54
55
44.4k
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) {
56
44.4k
  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
57
44.4k
}
58
59
1.93k
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) {
60
1.93k
  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
61
1.93k
}
62
63
11.2k
void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
64
  // Deal with various aggregate types as special cases:
65
66
  // Record types.
67
11.2k
  if (auto recType = type->getAs<RecordType>()) {
68
36
    addTypedData(recType->getDecl(), begin);
69
70
  // Array types.
71
11.2k
  } else if (type->isArrayType()) {
72
    // Incomplete array types (flexible array members?) don't provide
73
    // data to lay out, and the other cases shouldn't be possible.
74
34
    auto arrayType = CGM.getContext().getAsConstantArrayType(type);
75
34
    if (!arrayType) 
return0
;
76
77
34
    QualType eltType = arrayType->getElementType();
78
34
    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
79
8.32k
    for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; 
++i8.29k
) {
80
8.29k
      addTypedData(eltType, begin + i * eltSize);
81
8.29k
    }
82
83
  // Complex types.
84
11.2k
  } else if (auto complexType = type->getAs<ComplexType>()) {
85
0
    auto eltType = complexType->getElementType();
86
0
    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
87
0
    auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
88
0
    addTypedData(eltLLVMType, begin, begin + eltSize);
89
0
    addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
90
91
  // Member pointer types.
92
11.2k
  } else if (type->getAs<MemberPointerType>()) {
93
    // Just add it all as opaque.
94
0
    addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
95
96
    // Atomic types.
97
11.2k
  } else if (const auto *atomicType = type->getAs<AtomicType>()) {
98
8
    auto valueType = atomicType->getValueType();
99
8
    auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType);
100
8
    auto valueSize = CGM.getContext().getTypeSizeInChars(valueType);
101
102
8
    addTypedData(atomicType->getValueType(), begin);
103
104
    // Add atomic padding.
105
8
    auto atomicPadding = atomicSize - valueSize;
106
8
    if (atomicPadding > CharUnits::Zero())
107
4
      addOpaqueData(begin + valueSize, begin + atomicSize);
108
109
    // Everything else is scalar and should not convert as an LLVM aggregate.
110
11.2k
  } else {
111
    // We intentionally convert as !ForMem because we want to preserve
112
    // that a type was an i1.
113
11.2k
    auto *llvmType = CGM.getTypes().ConvertType(type);
114
11.2k
    addTypedData(llvmType, begin);
115
11.2k
  }
116
11.2k
}
117
118
36
void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) {
119
36
  addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
120
36
}
121
122
void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
123
1.06k
                                    const ASTRecordLayout &layout) {
124
  // Unions are a special case.
125
1.06k
  if (record->isUnion()) {
126
160
    for (auto field : record->fields()) {
127
160
      if (field->isBitField()) {
128
0
        addBitFieldData(field, begin, 0);
129
160
      } else {
130
160
        addTypedData(field->getType(), begin);
131
160
      }
132
160
    }
133
80
    return;
134
80
  }
135
136
  // Note that correctness does not rely on us adding things in
137
  // their actual order of layout; it's just somewhat more efficient
138
  // for the builder.
139
140
  // With that in mind, add "early" C++ data.
141
981
  auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
142
981
  if (cxxRecord) {
143
    //   - a v-table pointer, if the class adds its own
144
9
    if (layout.hasOwnVFPtr()) {
145
0
      addTypedData(CGM.Int8PtrTy, begin);
146
0
    }
147
148
    //   - non-virtual bases
149
0
    for (auto &baseSpecifier : cxxRecord->bases()) {
150
0
      if (baseSpecifier.isVirtual()) continue;
151
152
0
      auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
153
0
      addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
154
0
    }
155
156
    //   - a vbptr if the class adds its own
157
9
    if (layout.hasOwnVBPtr()) {
158
0
      addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
159
0
    }
160
9
  }
161
162
  // Add fields.
163
2.77k
  for (auto field : record->fields()) {
164
2.77k
    auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
165
2.77k
    if (field->isBitField()) {
166
0
      addBitFieldData(field, begin, fieldOffsetInBits);
167
2.77k
    } else {
168
2.77k
      addTypedData(field->getType(),
169
2.77k
              begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
170
2.77k
    }
171
2.77k
  }
172
173
  // Add "late" C++ data:
174
981
  if (cxxRecord) {
175
    //   - virtual bases
176
0
    for (auto &vbaseSpecifier : cxxRecord->vbases()) {
177
0
      auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
178
0
      addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
179
0
    }
180
9
  }
181
981
}
182
183
void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
184
                                       CharUnits recordBegin,
185
0
                                       uint64_t bitfieldBitBegin) {
186
0
  assert(bitfield->isBitField());
187
0
  auto &ctx = CGM.getContext();
188
0
  auto width = bitfield->getBitWidthValue(ctx);
189
190
  // We can ignore zero-width bit-fields.
191
0
  if (width == 0) return;
192
193
  // toCharUnitsFromBits rounds down.
194
0
  CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
195
196
  // Find the offset of the last byte that is partially occupied by the
197
  // bit-field; since we otherwise expect exclusive ends, the end is the
198
  // next byte.
199
0
  uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
200
0
  CharUnits bitfieldByteEnd =
201
0
    ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
202
0
  addOpaqueData(recordBegin + bitfieldByteBegin,
203
0
                recordBegin + bitfieldByteEnd);
204
0
}
205
206
11.2k
void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) {
207
11.2k
  assert(type && "didn't provide type for typed data");
208
11.2k
  addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
209
11.2k
}
210
211
void SwiftAggLowering::addTypedData(llvm::Type *type,
212
11.2k
                                    CharUnits begin, CharUnits end) {
213
11.2k
  assert(type && "didn't provide type for typed data");
214
11.2k
  assert(getTypeStoreSize(CGM, type) == end - begin);
215
216
  // Legalize vector types.
217
11.2k
  if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
218
1.31k
    SmallVector<llvm::Type*, 4> componentTys;
219
1.31k
    legalizeVectorType(CGM, end - begin, vecTy, componentTys);
220
1.31k
    assert(componentTys.size() >= 1);
221
222
    // Walk the initial components.
223
1.47k
    for (size_t i = 0, e = componentTys.size(); i != e - 1; 
++i158
) {
224
158
      llvm::Type *componentTy = componentTys[i];
225
158
      auto componentSize = getTypeStoreSize(CGM, componentTy);
226
158
      assert(componentSize < end - begin);
227
158
      addLegalTypedData(componentTy, begin, begin + componentSize);
228
158
      begin += componentSize;
229
158
    }
230
231
1.31k
    return addLegalTypedData(componentTys.back(), begin, end);
232
1.31k
  }
233
234
  // Legalize integer types.
235
9.88k
  if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
236
9.40k
    if (!isLegalIntegerType(CGM, intTy))
237
0
      return addOpaqueData(begin, end);
238
9.88k
  }
239
240
  // All other types should be legal.
241
9.88k
  return addLegalTypedData(type, begin, end);
242
9.88k
}
243
244
void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
245
11.4k
                                         CharUnits begin, CharUnits end) {
246
  // Require the type to be naturally aligned.
247
11.4k
  if (!begin.isZero() && 
!begin.isMultipleOf(getNaturalAlignment(CGM, type))10.2k
) {
248
249
    // Try splitting vector types.
250
32
    if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
251
16
      auto split = splitLegalVectorType(CGM, end - begin, vecTy);
252
16
      auto eltTy = split.first;
253
16
      auto numElts = split.second;
254
255
16
      auto eltSize = (end - begin) / numElts;
256
16
      assert(eltSize == getTypeStoreSize(CGM, eltTy));
257
54
      for (size_t i = 0, e = numElts; i != e; 
++i38
) {
258
38
        addLegalTypedData(eltTy, begin, begin + eltSize);
259
38
        begin += eltSize;
260
38
      }
261
16
      assert(begin == end);
262
16
      return;
263
16
    }
264
265
16
    return addOpaqueData(begin, end);
266
16
  }
267
268
11.3k
  addEntry(type, begin, end);
269
11.3k
}
270
271
void SwiftAggLowering::addEntry(llvm::Type *type,
272
11.5k
                                CharUnits begin, CharUnits end) {
273
11.5k
  assert((!type ||
274
11.5k
          (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
275
11.5k
         "cannot add aggregate-typed data");
276
11.5k
  assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
277
278
  // Fast path: we can just add entries to the end.
279
11.5k
  if (Entries.empty() || 
Entries.back().End <= begin10.4k
) {
280
11.3k
    Entries.push_back({begin, end, type});
281
11.3k
    return;
282
11.3k
  }
283
284
  // Find the first existing entry that ends after the start of the new data.
285
  // TODO: do a binary search if Entries is big enough for it to matter.
286
168
  size_t index = Entries.size() - 1;
287
208
  while (index != 0) {
288
88
    if (Entries[index - 1].End <= begin) 
break48
;
289
40
    --index;
290
40
  }
291
292
  // The entry ends after the start of the new data.
293
  // If the entry starts after the end of the new data, there's no conflict.
294
168
  if (Entries[index].Begin >= end) {
295
    // This insertion is potentially O(n), but the way we generally build
296
    // these layouts makes that unlikely to matter: we'd need a union of
297
    // several very large types.
298
24
    Entries.insert(Entries.begin() + index, {begin, end, type});
299
24
    return;
300
24
  }
301
302
  // Otherwise, the ranges overlap.  The new range might also overlap
303
  // with later ranges.
304
168
restartAfterSplit:
305
306
  // Simplest case: an exact overlap.
307
168
  if (Entries[index].Begin == begin && 
Entries[index].End == end144
) {
308
    // If the types match exactly, great.
309
72
    if (Entries[index].Type == type) 
return40
;
310
311
    // If either type is opaque, make the entry opaque and return.
312
32
    if (Entries[index].Type == nullptr) {
313
0
      return;
314
32
    } else if (type == nullptr) {
315
0
      Entries[index].Type = nullptr;
316
0
      return;
317
0
    }
318
319
    // If they disagree in an ABI-agnostic way, just resolve the conflict
320
    // arbitrarily.
321
32
    if (auto entryType = getCommonType(Entries[index].Type, type)) {
322
0
      Entries[index].Type = entryType;
323
0
      return;
324
0
    }
325
326
    // Otherwise, make the entry opaque.
327
32
    Entries[index].Type = nullptr;
328
32
    return;
329
32
  }
330
331
  // Okay, we have an overlapping conflict of some sort.
332
333
  // If we have a vector type, split it.
334
96
  if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
335
40
    auto eltTy = vecTy->getElementType();
336
40
    CharUnits eltSize =
337
40
        (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
338
40
    assert(eltSize == getTypeStoreSize(CGM, eltTy));
339
40
    for (unsigned i = 0,
340
40
                  e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
341
188
         i != e; 
++i148
) {
342
148
      addEntry(eltTy, begin, begin + eltSize);
343
148
      begin += eltSize;
344
148
    }
345
40
    assert(begin == end);
346
40
    return;
347
40
  }
348
349
  // If the entry is a vector type, split it and try again.
350
56
  if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
351
24
    splitVectorEntry(index);
352
24
    goto restartAfterSplit;
353
24
  }
354
355
  // Okay, we have no choice but to make the existing entry opaque.
356
357
32
  Entries[index].Type = nullptr;
358
359
  // Stretch the start of the entry to the beginning of the range.
360
32
  if (begin < Entries[index].Begin) {
361
16
    Entries[index].Begin = begin;
362
16
    assert(index == 0 || begin >= Entries[index - 1].End);
363
16
  }
364
365
  // Stretch the end of the entry to the end of the range; but if we run
366
  // into the start of the next entry, just leave the range there and repeat.
367
32
  while (end > Entries[index].End) {
368
16
    assert(Entries[index].Type == nullptr);
369
370
    // If the range doesn't overlap the next entry, we're done.
371
16
    if (index == Entries.size() - 1 || 
end <= Entries[index + 1].Begin0
) {
372
16
      Entries[index].End = end;
373
16
      break;
374
16
    }
375
376
    // Otherwise, stretch to the start of the next entry.
377
0
    Entries[index].End = Entries[index + 1].Begin;
378
379
    // Continue with the next entry.
380
0
    index++;
381
382
    // This entry needs to be made opaque if it is not already.
383
0
    if (Entries[index].Type == nullptr)
384
0
      continue;
385
386
    // Split vector entries unless we completely subsume them.
387
0
    if (Entries[index].Type->isVectorTy() &&
388
0
        end < Entries[index].End) {
389
0
      splitVectorEntry(index);
390
0
    }
391
392
    // Make the entry opaque.
393
0
    Entries[index].Type = nullptr;
394
0
  }
395
32
}
396
397
/// Replace the entry of vector type at offset 'index' with a sequence
398
/// of its component vectors.
399
24
void SwiftAggLowering::splitVectorEntry(unsigned index) {
400
24
  auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
401
24
  auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
402
403
24
  auto eltTy = split.first;
404
24
  CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
405
24
  auto numElts = split.second;
406
24
  Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
407
408
24
  CharUnits begin = Entries[index].Begin;
409
88
  for (unsigned i = 0; i != numElts; 
++i64
) {
410
64
    Entries[index].Type = eltTy;
411
64
    Entries[index].Begin = begin;
412
64
    Entries[index].End = begin + eltSize;
413
64
    begin += eltSize;
414
64
  }
415
24
}
416
417
/// Given a power-of-two unit size, return the offset of the aligned unit
418
/// of that size which contains the given offset.
419
///
420
/// In other words, round down to the nearest multiple of the unit size.
421
28.6k
static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) {
422
28.6k
  assert(isPowerOf2(unitSize.getQuantity()));
423
28.6k
  auto unitMask = ~(unitSize.getQuantity() - 1);
424
28.6k
  return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
425
28.6k
}
426
427
static bool areBytesInSameUnit(CharUnits first, CharUnits second,
428
10.3k
                               CharUnits chunkSize) {
429
10.3k
  return getOffsetAtStartOfUnit(first, chunkSize)
430
10.3k
      == getOffsetAtStartOfUnit(second, chunkSize);
431
10.3k
}
432
433
14.5k
static bool isMergeableEntryType(llvm::Type *type) {
434
  // Opaquely-typed memory is always mergeable.
435
14.5k
  if (type == nullptr) 
return true5.60k
;
436
437
  // Pointers and integers are always mergeable.  In theory we should not
438
  // merge pointers, but (1) it doesn't currently matter in practice because
439
  // the chunk size is never greater than the size of a pointer and (2)
440
  // Swift IRGen uses integer types for a lot of things that are "really"
441
  // just storing pointers (like Optional<SomePointer>).  If we ever have a
442
  // target that would otherwise combine pointers, we should put some effort
443
  // into fixing those cases in Swift IRGen and then call out pointer types
444
  // here.
445
446
  // Floating-point and vector types should never be merged.
447
  // Most such types are too large and highly-aligned to ever trigger merging
448
  // in practice, but it's important for the rule to cover at least 'half'
449
  // and 'float', as well as things like small vectors of 'i1' or 'i8'.
450
8.97k
  return (!type->isFloatingPointTy() && 
!type->isVectorTy()8.90k
);
451
8.97k
}
452
453
bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
454
                                          const StorageEntry &second,
455
10.3k
                                          CharUnits chunkSize) {
456
  // Only merge entries that overlap the same chunk.  We test this first
457
  // despite being a bit more expensive because this is the condition that
458
  // tends to prevent merging.
459
10.3k
  if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
460
10.3k
                          chunkSize))
461
3.05k
    return false;
462
463
7.32k
  return (isMergeableEntryType(first.Type) &&
464
7.25k
          isMergeableEntryType(second.Type));
465
7.32k
}
466
467
1.07k
void SwiftAggLowering::finish() {
468
1.07k
  if (Entries.empty()) {
469
18
    Finished = true;
470
18
    return;
471
18
  }
472
473
  // We logically split the layout down into a series of chunks of this size,
474
  // which is generally the size of a pointer.
475
1.05k
  const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
476
477
  // First pass: if two entries should be merged, make them both opaque
478
  // and stretch one to meet the next.
479
  // Also, remember if there are any opaque entries.
480
1.05k
  bool hasOpaqueEntries = (Entries[0].Type == nullptr);
481
11.4k
  for (size_t i = 1, e = Entries.size(); i != e; 
++i10.3k
) {
482
10.3k
    if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
483
7.25k
      Entries[i - 1].Type = nullptr;
484
7.25k
      Entries[i].Type = nullptr;
485
7.25k
      Entries[i - 1].End = Entries[i].Begin;
486
7.25k
      hasOpaqueEntries = true;
487
488
3.12k
    } else if (Entries[i].Type == nullptr) {
489
54
      hasOpaqueEntries = true;
490
54
    }
491
10.3k
  }
492
493
  // The rest of the algorithm leaves non-opaque entries alone, so if we
494
  // have no opaque entries, we're done.
495
1.05k
  if (!hasOpaqueEntries) {
496
801
    Finished = true;
497
801
    return;
498
801
  }
499
500
  // Okay, move the entries to a temporary and rebuild Entries.
501
254
  auto orig = std::move(Entries);
502
254
  assert(Entries.empty());
503
504
690
  for (size_t i = 0, e = orig.size(); i != e; 
++i436
) {
505
    // Just copy over non-opaque entries.
506
436
    if (orig[i].Type != nullptr) {
507
130
      Entries.push_back(orig[i]);
508
130
      continue;
509
130
    }
510
511
    // Scan forward to determine the full extent of the next opaque range.
512
    // We know from the first pass that only contiguous ranges will overlap
513
    // the same aligned chunk.
514
306
    auto begin = orig[i].Begin;
515
306
    auto end = orig[i].End;
516
9.02k
    while (i + 1 != e &&
517
8.84k
           orig[i + 1].Type == nullptr &&
518
8.75k
           end == orig[i + 1].Begin) {
519
8.72k
      end = orig[i + 1].End;
520
8.72k
      i++;
521
8.72k
    }
522
523
    // Add an entry per intersected chunk.
524
1.77k
    do {
525
      // Find the smallest aligned storage unit in the maximal aligned
526
      // storage unit containing 'begin' that contains all the bytes in
527
      // the intersection between the range and this chunk.
528
1.77k
      CharUnits localBegin = begin;
529
1.77k
      CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
530
1.77k
      CharUnits chunkEnd = chunkBegin + chunkSize;
531
1.77k
      CharUnits localEnd = std::min(end, chunkEnd);
532
533
      // Just do a simple loop over ever-increasing unit sizes.
534
1.77k
      CharUnits unitSize = CharUnits::One();
535
1.77k
      CharUnits unitBegin, unitEnd;
536
6.12k
      for (; ; 
unitSize *= 24.34k
) {
537
6.12k
        assert(unitSize <= chunkSize);
538
6.12k
        unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
539
6.12k
        unitEnd = unitBegin + unitSize;
540
6.12k
        if (unitEnd >= localEnd) 
break1.77k
;
541
6.12k
      }
542
543
      // Add an entry for this unit.
544
1.77k
      auto entryTy =
545
1.77k
        llvm::IntegerType::get(CGM.getLLVMContext(),
546
1.77k
                               CGM.getContext().toBits(unitSize));
547
1.77k
      Entries.push_back({unitBegin, unitEnd, entryTy});
548
549
      // The next chunk starts where this chunk left off.
550
1.77k
      begin = localEnd;
551
1.77k
    } while (begin != end);
552
306
  }
553
554
  // Okay, finally finished.
555
254
  Finished = true;
556
254
}
557
558
0
void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
559
0
  assert(Finished && "haven't yet finished lowering");
560
561
0
  for (auto &entry : Entries) {
562
0
    callback(entry.Begin, entry.End, entry.Type);
563
0
  }
564
0
}
565
566
std::pair<llvm::StructType*, llvm::Type*>
567
885
SwiftAggLowering::getCoerceAndExpandTypes() const {
568
885
  assert(Finished && "haven't yet finished lowering");
569
570
885
  auto &ctx = CGM.getLLVMContext();
571
572
885
  if (Entries.empty()) {
573
0
    auto type = llvm::StructType::get(ctx);
574
0
    return { type, type };
575
0
  }
576
577
885
  SmallVector<llvm::Type*, 8> elts;
578
885
  CharUnits lastEnd = CharUnits::Zero();
579
885
  bool hasPadding = false;
580
885
  bool packed = false;
581
1.93k
  for (auto &entry : Entries) {
582
1.93k
    if (entry.Begin != lastEnd) {
583
14
      auto paddingSize = entry.Begin - lastEnd;
584
14
      assert(!paddingSize.isNegative());
585
586
14
      auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
587
14
                                          paddingSize.getQuantity());
588
14
      elts.push_back(padding);
589
14
      hasPadding = true;
590
14
    }
591
592
1.93k
    if (!packed && !entry.Begin.isMultipleOf(
593
1.93k
          CharUnits::fromQuantity(
594
1.93k
            CGM.getDataLayout().getABITypeAlignment(entry.Type))))
595
0
      packed = true;
596
597
1.93k
    elts.push_back(entry.Type);
598
599
1.93k
    lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
600
1.93k
    assert(entry.End <= lastEnd);
601
1.93k
  }
602
603
  // We don't need to adjust 'packed' to deal with possible tail padding
604
  // because we never do that kind of access through the coercion type.
605
885
  auto coercionType = llvm::StructType::get(ctx, elts, packed);
606
607
885
  llvm::Type *unpaddedType = coercionType;
608
885
  if (hasPadding) {
609
12
    elts.clear();
610
44
    for (auto &entry : Entries) {
611
44
      elts.push_back(entry.Type);
612
44
    }
613
12
    if (elts.size() == 1) {
614
0
      unpaddedType = elts[0];
615
12
    } else {
616
12
      unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
617
12
    }
618
873
  } else if (Entries.size() == 1) {
619
313
    unpaddedType = Entries[0].Type;
620
313
  }
621
622
885
  return { coercionType, unpaddedType };
623
885
}
624
625
1.05k
bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
626
1.05k
  assert(Finished && "haven't yet finished lowering");
627
628
  // Empty types don't need to be passed indirectly.
629
1.05k
  if (Entries.empty()) 
return false0
;
630
631
  // Avoid copying the array of types when there's just a single element.
632
1.05k
  if (Entries.size() == 1) {
633
313
    return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(
634
313
                                                           Entries.back().Type,
635
313
                                                             asReturnValue);
636
313
  }
637
638
742
  SmallVector<llvm::Type*, 8> componentTys;
639
742
  componentTys.reserve(Entries.size());
640
3.87k
  for (auto &entry : Entries) {
641
3.87k
    componentTys.push_back(entry.Type);
642
3.87k
  }
643
742
  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
644
742
                                                           asReturnValue);
645
742
}
646
647
bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
648
                                     ArrayRef<llvm::Type*> componentTys,
649
0
                                     bool asReturnValue) {
650
0
  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
651
0
                                                           asReturnValue);
652
0
}
653
654
1.05k
CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
655
  // Currently always the size of an ordinary pointer.
656
1.05k
  return CGM.getContext().toCharUnitsFromBits(
657
1.05k
           CGM.getContext().getTargetInfo().getPointerWidth(0));
658
1.05k
}
659
660
21.7k
CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
661
  // For Swift's purposes, this is always just the store size of the type
662
  // rounded up to a power of 2.
663
21.7k
  auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
664
21.7k
  if (!isPowerOf2(size)) {
665
18
    size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
666
18
  }
667
21.7k
  assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
668
21.7k
  return CharUnits::fromQuantity(size);
669
21.7k
}
670
671
bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
672
9.40k
                                   llvm::IntegerType *intTy) {
673
9.40k
  auto size = intTy->getBitWidth();
674
9.40k
  switch (size) {
675
0
  case 1:
676
8.56k
  case 8:
677
8.77k
  case 16:
678
9.12k
  case 32:
679
9.40k
  case 64:
680
    // Just assume that the above are always legal.
681
9.40k
    return true;
682
683
0
  case 128:
684
0
    return CGM.getContext().getTargetInfo().hasInt128Type();
685
686
0
  default:
687
0
    return false;
688
9.40k
  }
689
9.40k
}
690
691
bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
692
1.31k
                                  llvm::VectorType *vectorTy) {
693
1.31k
  return isLegalVectorType(
694
1.31k
      CGM, vectorSize, vectorTy->getElementType(),
695
1.31k
      cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
696
1.31k
}
697
698
bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
699
1.49k
                                  llvm::Type *eltTy, unsigned numElts) {
700
1.49k
  assert(numElts > 1 && "illegal vector length");
701
1.49k
  return getSwiftABIInfo(CGM)
702
1.49k
           .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
703
1.49k
}
704
705
std::pair<llvm::Type*, unsigned>
706
swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
707
40
                                llvm::VectorType *vectorTy) {
708
40
  auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
709
40
  auto eltTy = vectorTy->getElementType();
710
711
  // Try to split the vector type in half.
712
40
  if (numElts >= 4 && 
isPowerOf2(numElts)16
) {
713
16
    if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
714
8
      return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
715
32
  }
716
717
32
  return {eltTy, numElts};
718
32
}
719
720
void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
721
                                   llvm::VectorType *origVectorTy,
722
1.31k
                             llvm::SmallVectorImpl<llvm::Type*> &components) {
723
  // If it's already a legal vector type, use it.
724
1.31k
  if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
725
1.16k
    components.push_back(origVectorTy);
726
1.16k
    return;
727
1.16k
  }
728
729
  // Try to split the vector into legal subvectors.
730
158
  auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
731
158
  auto eltTy = origVectorTy->getElementType();
732
158
  assert(numElts != 1);
733
734
  // The largest size that we're still considering making subvectors of.
735
  // Always a power of 2.
736
158
  unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
737
158
  unsigned candidateNumElts = 1U << logCandidateNumElts;
738
158
  assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
739
740
  // Minor optimization: don't check the legality of this exact size twice.
741
158
  if (candidateNumElts == numElts) {
742
114
    logCandidateNumElts--;
743
114
    candidateNumElts >>= 1;
744
114
  }
745
746
158
  CharUnits eltSize = (origVectorSize / numElts);
747
158
  CharUnits candidateSize = eltSize * candidateNumElts;
748
749
  // The sensibility of this algorithm relies on the fact that we never
750
  // have a legal non-power-of-2 vector size without having the power of 2
751
  // also be legal.
752
202
  while (logCandidateNumElts > 0) {
753
158
    assert(candidateNumElts == 1U << logCandidateNumElts);
754
158
    assert(candidateNumElts <= numElts);
755
158
    assert(candidateSize == eltSize * candidateNumElts);
756
757
    // Skip illegal vector sizes.
758
158
    if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
759
0
      logCandidateNumElts--;
760
0
      candidateNumElts /= 2;
761
0
      candidateSize /= 2;
762
0
      continue;
763
0
    }
764
765
    // Add the right number of vectors of this size.
766
158
    auto numVecs = numElts >> logCandidateNumElts;
767
158
    components.append(numVecs,
768
158
                      llvm::FixedVectorType::get(eltTy, candidateNumElts));
769
158
    numElts -= (numVecs << logCandidateNumElts);
770
771
158
    if (numElts == 0) 
return114
;
772
773
    // It's possible that the number of elements remaining will be legal.
774
    // This can happen with e.g. <7 x float> when <3 x float> is legal.
775
    // This only needs to be separately checked if it's not a power of 2.
776
44
    if (numElts > 2 && 
!isPowerOf2(numElts)0
&&
777
0
        isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
778
0
      components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
779
0
      return;
780
0
    }
781
782
    // Bring vecSize down to something no larger than numElts.
783
60
    
do 44
{
784
60
      logCandidateNumElts--;
785
60
      candidateNumElts /= 2;
786
60
      candidateSize /= 2;
787
60
    } while (candidateNumElts > numElts);
788
44
  }
789
790
  // Otherwise, just append a bunch of individual elements.
791
44
  components.append(numElts, eltTy);
792
44
}
793
794
bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM,
795
1.02k
                                         const RecordDecl *record) {
796
  // FIXME: should we not rely on the standard computation in Sema, just in
797
  // case we want to diverge from the platform ABI (e.g. on targets where
798
  // that uses the MSVC rule)?
799
1.02k
  return !record->canPassInRegisters();
800
1.02k
}
801
802
static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
803
                                       bool forReturn,
804
1.07k
                                       CharUnits alignmentForIndirect) {
805
1.07k
  if (lowering.empty()) {
806
18
    return ABIArgInfo::getIgnore();
807
1.05k
  } else if (lowering.shouldPassIndirectly(forReturn)) {
808
170
    return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
809
885
  } else {
810
885
    auto types = lowering.getCoerceAndExpandTypes();
811
885
    return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
812
885
  }
813
1.07k
}
814
815
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
816
1.78k
                               bool forReturn) {
817
1.78k
  if (auto recordType = dyn_cast<RecordType>(type)) {
818
1.02k
    auto record = recordType->getDecl();
819
1.02k
    auto &layout = CGM.getContext().getASTRecordLayout(record);
820
821
1.02k
    if (mustPassRecordIndirectly(CGM, record))
822
3
      return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
823
824
1.02k
    SwiftAggLowering lowering(CGM);
825
1.02k
    lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
826
1.02k
    lowering.finish();
827
828
1.02k
    return classifyExpandedType(lowering, forReturn, layout.getAlignment());
829
1.02k
  }
830
831
  // Just assume that all of our target ABIs can support returning at least
832
  // two integer or floating-point values.
833
759
  if (isa<ComplexType>(type)) {
834
0
    return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
835
0
  }
836
837
  // Vector types may need to be legalized.
838
759
  if (isa<VectorType>(type)) {
839
48
    SwiftAggLowering lowering(CGM);
840
48
    lowering.addTypedData(type, CharUnits::Zero());
841
48
    lowering.finish();
842
843
48
    CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
844
48
    return classifyExpandedType(lowering, forReturn, alignment);
845
48
  }
846
847
  // Member pointer types need to be expanded, but it's a simple form of
848
  // expansion that 'Direct' can handle.  Note that CanBeFlattened should be
849
  // true for this to work.
850
851
  // 'void' needs to be ignored.
852
711
  if (type->isVoidType()) {
853
585
    return ABIArgInfo::getIgnore();
854
585
  }
855
856
  // Everything else can be passed directly.
857
126
  return ABIArgInfo::getDirect();
858
126
}
859
860
1.13k
ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) {
861
1.13k
  return classifyType(CGM, type, /*forReturn*/ true);
862
1.13k
}
863
864
ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM,
865
655
                                           CanQualType type) {
866
655
  return classifyType(CGM, type, /*forReturn*/ false);
867
655
}
868
869
1.13k
void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
870
1.13k
  auto &retInfo = FI.getReturnInfo();
871
1.13k
  retInfo = classifyReturnType(CGM, FI.getReturnType());
872
873
1.78k
  for (unsigned i = 0, e = FI.arg_size(); i != e; 
++i655
) {
874
655
    auto &argInfo = FI.arg_begin()[i];
875
655
    argInfo.info = classifyArgumentType(CGM, argInfo.type);
876
655
  }
877
1.13k
}
878
879
// Is swifterror lowered to a register by the target ABI.
880
0
bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) {
881
0
  return getSwiftABIInfo(CGM).isSwiftErrorInRegister();
882
0
}