Coverage Report

Created: 2019-02-20 00:17

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/CodeGen/RegAllocPBQP.h
Line
Count
Source (jump to first uncovered line)
1
//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file defines the PBQPBuilder interface, for classes which build PBQP
10
// instances to represent register allocation problems, and the RegAllocPBQP
11
// interface.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
16
#define LLVM_CODEGEN_REGALLOCPBQP_H
17
18
#include "llvm/ADT/DenseMap.h"
19
#include "llvm/ADT/Hashing.h"
20
#include "llvm/CodeGen/PBQP/CostAllocator.h"
21
#include "llvm/CodeGen/PBQP/Graph.h"
22
#include "llvm/CodeGen/PBQP/Math.h"
23
#include "llvm/CodeGen/PBQP/ReductionRules.h"
24
#include "llvm/CodeGen/PBQP/Solution.h"
25
#include "llvm/Support/ErrorHandling.h"
26
#include <algorithm>
27
#include <cassert>
28
#include <cstddef>
29
#include <limits>
30
#include <memory>
31
#include <set>
32
#include <vector>
33
34
namespace llvm {
35
36
class FunctionPass;
37
class LiveIntervals;
38
class MachineBlockFrequencyInfo;
39
class MachineFunction;
40
class raw_ostream;
41
42
namespace PBQP {
43
namespace RegAlloc {
44
45
/// Spill option index.
46
306
inline unsigned getSpillOptionIdx() { return 0; }
47
48
/// Metadata to speed allocatability test.
49
///
50
/// Keeps track of the number of infinities in each row and column.
51
class MatrixMetadata {
52
public:
53
  MatrixMetadata(const Matrix& M)
54
    : UnsafeRows(new bool[M.getRows() - 1]()),
55
70
      UnsafeCols(new bool[M.getCols() - 1]()) {
56
70
    unsigned* ColCounts = new unsigned[M.getCols() - 1]();
57
70
58
2.09k
    for (unsigned i = 1; i < M.getRows(); 
++i2.02k
) {
59
2.02k
      unsigned RowCount = 0;
60
61.9k
      for (unsigned j = 1; j < M.getCols(); 
++j59.9k
) {
61
59.9k
        if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
62
1.83k
          ++RowCount;
63
1.83k
          ++ColCounts[j - 1];
64
1.83k
          UnsafeRows[i - 1] = true;
65
1.83k
          UnsafeCols[j - 1] = true;
66
1.83k
        }
67
59.9k
      }
68
2.02k
      WorstRow = std::max(WorstRow, RowCount);
69
2.02k
    }
70
70
    unsigned WorstColCountForCurRow =
71
70
      *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
72
70
    WorstCol = std::max(WorstCol, WorstColCountForCurRow);
73
70
    delete[] ColCounts;
74
70
  }
75
76
  MatrixMetadata(const MatrixMetadata &) = delete;
77
  MatrixMetadata &operator=(const MatrixMetadata &) = delete;
78
79
799
  unsigned getWorstRow() const { return WorstRow; }
80
1.02k
  unsigned getWorstCol() const { return WorstCol; }
81
1.02k
  const bool* getUnsafeRows() const { return UnsafeRows.get(); }
82
799
  const bool* getUnsafeCols() const { return UnsafeCols.get(); }
83
84
private:
85
  unsigned WorstRow = 0;
86
  unsigned WorstCol = 0;
87
  std::unique_ptr<bool[]> UnsafeRows;
88
  std::unique_ptr<bool[]> UnsafeCols;
89
};
90
91
/// Holds a vector of the allowed physical regs for a vreg.
92
class AllowedRegVector {
93
  friend hash_code hash_value(const AllowedRegVector &);
94
95
public:
96
  AllowedRegVector() = default;
97
201
  AllowedRegVector(AllowedRegVector &&) = default;
98
99
  AllowedRegVector(const std::vector<unsigned> &OptVec)
100
153
    : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
101
153
    std::copy(OptVec.begin(), OptVec.end(), Opts.get());
102
153
  }
103
104
48.2k
  unsigned size() const { return NumOpts; }
105
127k
  unsigned operator[](size_t I) const { return Opts[I]; }
106
107
153
  bool operator==(const AllowedRegVector &Other) const {
108
153
    if (NumOpts != Other.NumOpts)
109
0
      return false;
110
153
    return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
111
153
  }
112
113
  bool operator!=(const AllowedRegVector &Other) const {
114
    return !(*this == Other);
115
  }
116
117
private:
118
  unsigned NumOpts = 0;
119
  std::unique_ptr<unsigned[]> Opts;
120
};
121
122
193
inline hash_code hash_value(const AllowedRegVector &OptRegs) {
123
193
  unsigned *OStart = OptRegs.Opts.get();
124
193
  unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
125
193
  return hash_combine(OptRegs.NumOpts,
126
193
                      hash_combine_range(OStart, OEnd));
127
193
}
128
129
/// Holds graph-level metadata relevant to PBQP RA problems.
130
class GraphMetadata {
131
private:
132
  using AllowedRegVecPool = ValuePool<AllowedRegVector>;
133
134
public:
135
  using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
136
137
  GraphMetadata(MachineFunction &MF,
138
                LiveIntervals &LIS,
139
                MachineBlockFrequencyInfo &MBFI)
140
8
    : MF(MF), LIS(LIS), MBFI(MBFI) {}
141
142
  MachineFunction &MF;
143
  LiveIntervals &LIS;
144
  MachineBlockFrequencyInfo &MBFI;
145
146
153
  void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
147
153
    VRegToNodeId[VReg] = NId;
148
153
  }
149
150
138
  GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
151
138
    auto VRegItr = VRegToNodeId.find(VReg);
152
138
    if (VRegItr == VRegToNodeId.end())
153
0
      return GraphBase::invalidNodeId();
154
138
    return VRegItr->second;
155
138
  }
156
157
153
  AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
158
153
    return AllowedRegVecs.getValue(std::move(Allowed));
159
153
  }
160
161
private:
162
  DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
163
  AllowedRegVecPool AllowedRegVecs;
164
};
165
166
/// Holds solver state and other metadata relevant to each PBQP RA node.
167
class NodeMetadata {
168
public:
169
  using AllowedRegVector = RegAlloc::AllowedRegVector;
170
171
  // The node's reduction state. The order in this enum is important,
172
  // as it is assumed nodes can only progress up (i.e. towards being
173
  // optimally reducible) when reducing the graph.
174
  using ReductionState = enum {
175
    Unprocessed,
176
    NotProvablyAllocatable,
177
    ConservativelyAllocatable,
178
    OptimallyReducible
179
  };
180
181
153
  NodeMetadata() = default;
182
183
  NodeMetadata(const NodeMetadata &Other)
184
    : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
185
      OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
186
      AllowedRegs(Other.AllowedRegs)
187
#ifndef NDEBUG
188
      , everConservativelyAllocatable(Other.everConservativelyAllocatable)
189
#endif
190
  {
191
    if (NumOpts > 0) {
192
      std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
193
                &OptUnsafeEdges[0]);
194
    }
195
  }
196
197
373
  NodeMetadata(NodeMetadata &&) = default;
198
0
  NodeMetadata& operator=(NodeMetadata &&) = default;
199
200
153
  void setVReg(unsigned VReg) { this->VReg = VReg; }
201
459
  unsigned getVReg() const { return VReg; }
202
203
153
  void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
204
153
    this->AllowedRegs = std::move(AllowedRegs);
205
153
  }
206
3.53k
  const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
207
208
153
  void setup(const Vector& Costs) {
209
153
    NumOpts = Costs.getLength() - 1;
210
153
    OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
211
153
  }
212
213
796
  ReductionState getReductionState() const { return RS; }
214
240
  void setReductionState(ReductionState RS) {
215
240
    assert(RS >= this->RS && "A node's reduction state can not be downgraded");
216
240
    this->RS = RS;
217
240
218
240
#ifndef NDEBUG
219
240
    // Remember this state to assert later that a non-infinite register
220
240
    // option was available.
221
240
    if (RS == ConservativelyAllocatable)
222
240
      everConservativelyAllocatable = true;
223
240
#endif
224
240
  }
225
226
1.19k
  void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
227
1.19k
    DeniedOpts += Transpose ? 
MD.getWorstRow()596
:
MD.getWorstCol()596
;
228
1.19k
    const bool* UnsafeOpts =
229
1.19k
      Transpose ? 
MD.getUnsafeCols()596
:
MD.getUnsafeRows()596
;
230
31.0k
    for (unsigned i = 0; i < NumOpts; 
++i29.8k
)
231
29.8k
      OptUnsafeEdges[i] += UnsafeOpts[i];
232
1.19k
  }
233
234
631
  void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
235
631
    DeniedOpts -= Transpose ? 
MD.getWorstRow()203
:
MD.getWorstCol()428
;
236
631
    const bool* UnsafeOpts =
237
631
      Transpose ? 
MD.getUnsafeCols()203
:
MD.getUnsafeRows()428
;
238
16.5k
    for (unsigned i = 0; i < NumOpts; 
++i15.8k
)
239
15.8k
      OptUnsafeEdges[i] -= UnsafeOpts[i];
240
631
  }
241
242
261
  bool isConservativelyAllocatable() const {
243
261
    return (DeniedOpts < NumOpts) ||
244
261
      (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
245
138
       &OptUnsafeEdges[NumOpts]);
246
261
  }
247
248
#ifndef NDEBUG
249
  bool wasConservativelyAllocatable() const {
250
    return everConservativelyAllocatable;
251
  }
252
#endif
253
254
private:
255
  ReductionState RS = Unprocessed;
256
  unsigned NumOpts = 0;
257
  unsigned DeniedOpts = 0;
258
  std::unique_ptr<unsigned[]> OptUnsafeEdges;
259
  unsigned VReg = 0;
260
  GraphMetadata::AllowedRegVecRef AllowedRegs;
261
262
#ifndef NDEBUG
263
  bool everConservativelyAllocatable = false;
264
#endif
265
};
266
267
class RegAllocSolverImpl {
268
private:
269
  using RAMatrix = MDMatrix<MatrixMetadata>;
270
271
public:
272
  using RawVector = PBQP::Vector;
273
  using RawMatrix = PBQP::Matrix;
274
  using Vector = PBQP::Vector;
275
  using Matrix = RAMatrix;
276
  using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
277
278
  using NodeId = GraphBase::NodeId;
279
  using EdgeId = GraphBase::EdgeId;
280
281
  using NodeMetadata = RegAlloc::NodeMetadata;
282
0
  struct EdgeMetadata {};
283
  using GraphMetadata = RegAlloc::GraphMetadata;
284
285
  using Graph = PBQP::Graph<RegAllocSolverImpl>;
286
287
8
  RegAllocSolverImpl(Graph &G) : G(G) {}
288
289
8
  Solution solve() {
290
8
    G.setSolver(*this);
291
8
    Solution S;
292
8
    setup();
293
8
    S = backpropagate(G, reduce());
294
8
    G.unsetSolver();
295
8
    return S;
296
8
  }
297
298
153
  void handleAddNode(NodeId NId) {
299
153
    assert(G.getNodeCosts(NId).getLength() > 1 &&
300
153
           "PBQP Graph should not contain single or zero-option nodes");
301
153
    G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
302
153
  }
303
304
  void handleRemoveNode(NodeId NId) {}
305
17
  void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
306
307
561
  void handleAddEdge(EdgeId EId) {
308
561
    handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
309
561
    handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
310
561
  }
311
312
561
  void handleDisconnectEdge(EdgeId EId, NodeId NId) {
313
561
    NodeMetadata& NMd = G.getNodeMetadata(NId);
314
561
    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
315
561
    NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
316
561
    promote(NId, NMd);
317
561
  }
318
319
1.12k
  void handleReconnectEdge(EdgeId EId, NodeId NId) {
320
1.12k
    NodeMetadata& NMd = G.getNodeMetadata(NId);
321
1.12k
    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
322
1.12k
    NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
323
1.12k
  }
324
325
35
  void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
326
35
    NodeId N1Id = G.getEdgeNode1Id(EId);
327
35
    NodeId N2Id = G.getEdgeNode2Id(EId);
328
35
    NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
329
35
    NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
330
35
    bool Transpose = N1Id != G.getEdgeNode1Id(EId);
331
35
332
35
    // Metadata are computed incrementally. First, update them
333
35
    // by removing the old cost.
334
35
    const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
335
35
    N1Md.handleRemoveEdge(OldMMd, Transpose);
336
35
    N2Md.handleRemoveEdge(OldMMd, !Transpose);
337
35
338
35
    // And update now the metadata with the new cost.
339
35
    const MatrixMetadata& MMd = NewCosts.getMetadata();
340
35
    N1Md.handleAddEdge(MMd, Transpose);
341
35
    N2Md.handleAddEdge(MMd, !Transpose);
342
35
343
35
    // As the metadata may have changed with the update, the nodes may have
344
35
    // become ConservativelyAllocatable or OptimallyReducible.
345
35
    promote(N1Id, N1Md);
346
35
    promote(N2Id, N2Md);
347
35
  }
348
349
private:
350
631
  void promote(NodeId NId, NodeMetadata& NMd) {
351
631
    if (G.getNodeDegree(NId) == 3) {
352
75
      // This node is becoming optimally reducible.
353
75
      moveToOptimallyReducibleNodes(NId);
354
556
    } else if (NMd.getReductionState() ==
355
556
               NodeMetadata::NotProvablyAllocatable &&
356
556
               
NMd.isConservativelyAllocatable()127
) {
357
12
      // This node just became conservatively allocatable.
358
12
      moveToConservativelyAllocatableNodes(NId);
359
12
    }
360
631
  }
361
362
240
  void removeFromCurrentSet(NodeId NId) {
363
240
    switch (G.getNodeMetadata(NId).getReductionState()) {
364
240
    
case NodeMetadata::Unprocessed: break153
;
365
240
    case NodeMetadata::OptimallyReducible:
366
19
      assert(OptimallyReducibleNodes.find(NId) !=
367
19
             OptimallyReducibleNodes.end() &&
368
19
             "Node not in optimally reducible set.");
369
19
      OptimallyReducibleNodes.erase(NId);
370
19
      break;
371
240
    case NodeMetadata::ConservativelyAllocatable:
372
56
      assert(ConservativelyAllocatableNodes.find(NId) !=
373
56
             ConservativelyAllocatableNodes.end() &&
374
56
             "Node not in conservatively allocatable set.");
375
56
      ConservativelyAllocatableNodes.erase(NId);
376
56
      break;
377
240
    case NodeMetadata::NotProvablyAllocatable:
378
12
      assert(NotProvablyAllocatableNodes.find(NId) !=
379
12
             NotProvablyAllocatableNodes.end() &&
380
12
             "Node not in not-provably-allocatable set.");
381
12
      NotProvablyAllocatableNodes.erase(NId);
382
12
      break;
383
240
    }
384
240
  }
385
386
94
  void moveToOptimallyReducibleNodes(NodeId NId) {
387
94
    removeFromCurrentSet(NId);
388
94
    OptimallyReducibleNodes.insert(NId);
389
94
    G.getNodeMetadata(NId).setReductionState(
390
94
      NodeMetadata::OptimallyReducible);
391
94
  }
392
393
124
  void moveToConservativelyAllocatableNodes(NodeId NId) {
394
124
    removeFromCurrentSet(NId);
395
124
    ConservativelyAllocatableNodes.insert(NId);
396
124
    G.getNodeMetadata(NId).setReductionState(
397
124
      NodeMetadata::ConservativelyAllocatable);
398
124
  }
399
400
22
  void moveToNotProvablyAllocatableNodes(NodeId NId) {
401
22
    removeFromCurrentSet(NId);
402
22
    NotProvablyAllocatableNodes.insert(NId);
403
22
    G.getNodeMetadata(NId).setReductionState(
404
22
      NodeMetadata::NotProvablyAllocatable);
405
22
  }
406
407
8
  void setup() {
408
8
    // Set up worklists.
409
153
    for (auto NId : G.nodeIds()) {
410
153
      if (G.getNodeDegree(NId) < 3)
411
19
        moveToOptimallyReducibleNodes(NId);
412
134
      else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
413
112
        moveToConservativelyAllocatableNodes(NId);
414
22
      else
415
22
        moveToNotProvablyAllocatableNodes(NId);
416
153
    }
417
8
  }
418
419
  // Compute a reduction order for the graph by iteratively applying PBQP
420
  // reduction rules. Locally optimal rules are applied whenever possible (R0,
421
  // R1, R2). If no locally-optimal rules apply then any conservatively
422
  // allocatable node is reduced. Finally, if no conservatively allocatable
423
  // node exists then the node with the lowest spill-cost:degree ratio is
424
  // selected.
425
8
  std::vector<GraphBase::NodeId> reduce() {
426
8
    assert(!G.empty() && "Cannot reduce empty graph.");
427
8
428
8
    using NodeId = GraphBase::NodeId;
429
8
    std::vector<NodeId> NodeStack;
430
8
431
8
    // Consume worklists.
432
161
    while (true) {
433
161
      if (!OptimallyReducibleNodes.empty()) {
434
75
        NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
435
75
        NodeId NId = *NItr;
436
75
        OptimallyReducibleNodes.erase(NItr);
437
75
        NodeStack.push_back(NId);
438
75
        switch (G.getNodeDegree(NId)) {
439
75
        case 0:
440
23
          break;
441
75
        case 1:
442
17
          applyR1(G, NId);
443
17
          break;
444
75
        case 2:
445
35
          applyR2(G, NId);
446
35
          break;
447
75
        
default: 0
llvm_unreachable0
("Not an optimally reducible node.");
448
86
        }
449
86
      } else if (!ConservativelyAllocatableNodes.empty()) {
450
68
        // Conservatively allocatable nodes will never spill. For now just
451
68
        // take the first node in the set and push it on the stack. When we
452
68
        // start optimizing more heavily for register preferencing, it may
453
68
        // would be better to push nodes with lower 'expected' or worst-case
454
68
        // register costs first (since early nodes are the most
455
68
        // constrained).
456
68
        NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
457
68
        NodeId NId = *NItr;
458
68
        ConservativelyAllocatableNodes.erase(NItr);
459
68
        NodeStack.push_back(NId);
460
68
        G.disconnectAllNeighborsFromNode(NId);
461
68
      } else 
if (18
!NotProvablyAllocatableNodes.empty()18
) {
462
10
        NodeSet::iterator NItr =
463
10
          std::min_element(NotProvablyAllocatableNodes.begin(),
464
10
                           NotProvablyAllocatableNodes.end(),
465
10
                           SpillCostComparator(G));
466
10
        NodeId NId = *NItr;
467
10
        NotProvablyAllocatableNodes.erase(NItr);
468
10
        NodeStack.push_back(NId);
469
10
        G.disconnectAllNeighborsFromNode(NId);
470
10
      } else
471
8
        break;
472
161
    }
473
8
474
8
    return NodeStack;
475
8
  }
476
477
  class SpillCostComparator {
478
  public:
479
10
    SpillCostComparator(const Graph& G) : G(G) {}
480
481
105
    bool operator()(NodeId N1Id, NodeId N2Id) {
482
105
      PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
483
105
      PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
484
105
      if (N1SC == N2SC)
485
0
        return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
486
105
      return N1SC < N2SC;
487
105
    }
488
489
  private:
490
    const Graph& G;
491
  };
492
493
  Graph& G;
494
  using NodeSet = std::set<NodeId>;
495
  NodeSet OptimallyReducibleNodes;
496
  NodeSet ConservativelyAllocatableNodes;
497
  NodeSet NotProvablyAllocatableNodes;
498
};
499
500
class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
501
private:
502
  using BaseT = PBQP::Graph<RegAllocSolverImpl>;
503
504
public:
505
8
  PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
506
507
  /// Dump this graph to dbgs().
508
  void dump() const;
509
510
  /// Dump this graph to an output stream.
511
  /// @param OS Output stream to print on.
512
  void dump(raw_ostream &OS) const;
513
514
  /// Print a representation of this graph in DOT format.
515
  /// @param OS Output stream to print on.
516
  void printDot(raw_ostream &OS) const;
517
};
518
519
8
inline Solution solve(PBQPRAGraph& G) {
520
8
  if (G.empty())
521
0
    return Solution();
522
8
  RegAllocSolverImpl RegAllocSolver(G);
523
8
  return RegAllocSolver.solve();
524
8
}
525
526
} // end namespace RegAlloc
527
} // end namespace PBQP
528
529
/// Create a PBQP register allocator instance.
530
FunctionPass *
531
createPBQPRegisterAllocator(char *customPassID = nullptr);
532
533
} // end namespace llvm
534
535
#endif // LLVM_CODEGEN_REGALLOCPBQP_H