/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/Analysis/ThreadSafetyTIL.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- ThreadSafetyTIL.cpp ------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" |
10 | | #include "clang/Basic/LLVM.h" |
11 | | #include "llvm/Support/Casting.h" |
12 | | #include <cassert> |
13 | | #include <cstddef> |
14 | | |
15 | | using namespace clang; |
16 | | using namespace threadSafety; |
17 | | using namespace til; |
18 | | |
19 | 0 | StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) { |
20 | 0 | switch (Op) { |
21 | 0 | case UOP_Minus: return "-"; |
22 | 0 | case UOP_BitNot: return "~"; |
23 | 0 | case UOP_LogicNot: return "!"; |
24 | 0 | } |
25 | 0 | return {}; |
26 | 0 | } |
27 | | |
28 | 8 | StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) { |
29 | 8 | switch (Op) { |
30 | 0 | case BOP_Mul: return "*"; |
31 | 0 | case BOP_Div: return "/"; |
32 | 0 | case BOP_Rem: return "%"; |
33 | 0 | case BOP_Add: return "+"; |
34 | 0 | case BOP_Sub: return "-"; |
35 | 0 | case BOP_Shl: return "<<"; |
36 | 0 | case BOP_Shr: return ">>"; |
37 | 0 | case BOP_BitAnd: return "&"; |
38 | 0 | case BOP_BitXor: return "^"; |
39 | 0 | case BOP_BitOr: return "|"; |
40 | 0 | case BOP_Eq: return "=="; |
41 | 0 | case BOP_Neq: return "!="; |
42 | 8 | case BOP_Lt: return "<"; |
43 | 0 | case BOP_Leq: return "<="; |
44 | 0 | case BOP_Cmp: return "<=>"; |
45 | 0 | case BOP_LogicAnd: return "&&"; |
46 | 0 | case BOP_LogicOr: return "||"; |
47 | 8 | } |
48 | 0 | return {}; |
49 | 8 | } |
50 | | |
51 | 0 | SExpr* Future::force() { |
52 | 0 | Status = FS_evaluating; |
53 | 0 | Result = compute(); |
54 | 0 | Status = FS_done; |
55 | 0 | return Result; |
56 | 0 | } |
57 | | |
58 | 0 | unsigned BasicBlock::addPredecessor(BasicBlock *Pred) { |
59 | 0 | unsigned Idx = Predecessors.size(); |
60 | 0 | Predecessors.reserveCheck(1, Arena); |
61 | 0 | Predecessors.push_back(Pred); |
62 | 0 | for (auto *E : Args) { |
63 | 0 | if (auto *Ph = dyn_cast<Phi>(E)) { |
64 | 0 | Ph->values().reserveCheck(1, Arena); |
65 | 0 | Ph->values().push_back(nullptr); |
66 | 0 | } |
67 | 0 | } |
68 | 0 | return Idx; |
69 | 0 | } |
70 | | |
71 | 0 | void BasicBlock::reservePredecessors(unsigned NumPreds) { |
72 | 0 | Predecessors.reserve(NumPreds, Arena); |
73 | 0 | for (auto *E : Args) { |
74 | 0 | if (auto *Ph = dyn_cast<Phi>(E)) { |
75 | 0 | Ph->values().reserve(NumPreds, Arena); |
76 | 0 | } |
77 | 0 | } |
78 | 0 | } |
79 | | |
80 | | // If E is a variable, then trace back through any aliases or redundant |
81 | | // Phi nodes to find the canonical definition. |
82 | 0 | const SExpr *til::getCanonicalVal(const SExpr *E) { |
83 | 0 | while (true) { |
84 | 0 | if (const auto *V = dyn_cast<Variable>(E)) { |
85 | 0 | if (V->kind() == Variable::VK_Let) { |
86 | 0 | E = V->definition(); |
87 | 0 | continue; |
88 | 0 | } |
89 | 0 | } |
90 | 0 | if (const auto *Ph = dyn_cast<Phi>(E)) { |
91 | 0 | if (Ph->status() == Phi::PH_SingleVal) { |
92 | 0 | E = Ph->values()[0]; |
93 | 0 | continue; |
94 | 0 | } |
95 | 0 | } |
96 | 0 | break; |
97 | 0 | } |
98 | 0 | return E; |
99 | 0 | } |
100 | | |
101 | | // If E is a variable, then trace back through any aliases or redundant |
102 | | // Phi nodes to find the canonical definition. |
103 | | // The non-const version will simplify incomplete Phi nodes. |
104 | 0 | SExpr *til::simplifyToCanonicalVal(SExpr *E) { |
105 | 0 | while (true) { |
106 | 0 | if (auto *V = dyn_cast<Variable>(E)) { |
107 | 0 | if (V->kind() != Variable::VK_Let) |
108 | 0 | return V; |
109 | | // Eliminate redundant variables, e.g. x = y, or x = 5, |
110 | | // but keep anything more complicated. |
111 | 0 | if (til::ThreadSafetyTIL::isTrivial(V->definition())) { |
112 | 0 | E = V->definition(); |
113 | 0 | continue; |
114 | 0 | } |
115 | 0 | return V; |
116 | 0 | } |
117 | 0 | if (auto *Ph = dyn_cast<Phi>(E)) { |
118 | 0 | if (Ph->status() == Phi::PH_Incomplete) |
119 | 0 | simplifyIncompleteArg(Ph); |
120 | | // Eliminate redundant Phi nodes. |
121 | 0 | if (Ph->status() == Phi::PH_SingleVal) { |
122 | 0 | E = Ph->values()[0]; |
123 | 0 | continue; |
124 | 0 | } |
125 | 0 | } |
126 | 0 | return E; |
127 | 0 | } |
128 | 0 | } |
129 | | |
130 | | // Trace the arguments of an incomplete Phi node to see if they have the same |
131 | | // canonical definition. If so, mark the Phi node as redundant. |
132 | | // getCanonicalVal() will recursively call simplifyIncompletePhi(). |
133 | 0 | void til::simplifyIncompleteArg(til::Phi *Ph) { |
134 | 0 | assert(Ph && Ph->status() == Phi::PH_Incomplete); |
135 | | |
136 | | // eliminate infinite recursion -- assume that this node is not redundant. |
137 | 0 | Ph->setStatus(Phi::PH_MultiVal); |
138 | |
|
139 | 0 | SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]); |
140 | 0 | for (unsigned i = 1, n = Ph->values().size(); i < n; ++i) { |
141 | 0 | SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]); |
142 | 0 | if (Ei == Ph) |
143 | 0 | continue; // Recursive reference to itself. Don't count. |
144 | 0 | if (Ei != E0) { |
145 | 0 | return; // Status is already set to MultiVal. |
146 | 0 | } |
147 | 0 | } |
148 | 0 | Ph->setStatus(Phi::PH_SingleVal); |
149 | 0 | } |
150 | | |
151 | | // Renumbers the arguments and instructions to have unique, sequential IDs. |
152 | 0 | unsigned BasicBlock::renumberInstrs(unsigned ID) { |
153 | 0 | for (auto *Arg : Args) |
154 | 0 | Arg->setID(this, ID++); |
155 | 0 | for (auto *Instr : Instrs) |
156 | 0 | Instr->setID(this, ID++); |
157 | 0 | TermInstr->setID(this, ID++); |
158 | 0 | return ID; |
159 | 0 | } |
160 | | |
161 | | // Sorts the CFGs blocks using a reverse post-order depth-first traversal. |
162 | | // Each block will be written into the Blocks array in order, and its BlockID |
163 | | // will be set to the index in the array. Sorting should start from the entry |
164 | | // block, and ID should be the total number of blocks. |
165 | | unsigned BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks, |
166 | 0 | unsigned ID) { |
167 | 0 | if (Visited) return ID; |
168 | 0 | Visited = true; |
169 | 0 | for (auto *Block : successors()) |
170 | 0 | ID = Block->topologicalSort(Blocks, ID); |
171 | | // set ID and update block array in place. |
172 | | // We may lose pointers to unreachable blocks. |
173 | 0 | assert(ID > 0); |
174 | 0 | BlockID = --ID; |
175 | 0 | Blocks[BlockID] = this; |
176 | 0 | return ID; |
177 | 0 | } |
178 | | |
179 | | // Performs a reverse topological traversal, starting from the exit block and |
180 | | // following back-edges. The dominator is serialized before any predecessors, |
181 | | // which guarantees that all blocks are serialized after their dominator and |
182 | | // before their post-dominator (because it's a reverse topological traversal). |
183 | | // ID should be initially set to 0. |
184 | | // |
185 | | // This sort assumes that (1) dominators have been computed, (2) there are no |
186 | | // critical edges, and (3) the entry block is reachable from the exit block |
187 | | // and no blocks are accessible via traversal of back-edges from the exit that |
188 | | // weren't accessible via forward edges from the entry. |
189 | | unsigned BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock *> &Blocks, |
190 | 0 | unsigned ID) { |
191 | | // Visited is assumed to have been set by the topologicalSort. This pass |
192 | | // assumes !Visited means that we've visited this node before. |
193 | 0 | if (!Visited) return ID; |
194 | 0 | Visited = false; |
195 | 0 | if (DominatorNode.Parent) |
196 | 0 | ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID); |
197 | 0 | for (auto *Pred : Predecessors) |
198 | 0 | ID = Pred->topologicalFinalSort(Blocks, ID); |
199 | 0 | assert(static_cast<size_t>(ID) < Blocks.size()); |
200 | 0 | BlockID = ID++; |
201 | 0 | Blocks[BlockID] = this; |
202 | 0 | return ID; |
203 | 0 | } |
204 | | |
205 | | // Computes the immediate dominator of the current block. Assumes that all of |
206 | | // its predecessors have already computed their dominators. This is achieved |
207 | | // by visiting the nodes in topological order. |
208 | 0 | void BasicBlock::computeDominator() { |
209 | 0 | BasicBlock *Candidate = nullptr; |
210 | | // Walk backwards from each predecessor to find the common dominator node. |
211 | 0 | for (auto *Pred : Predecessors) { |
212 | | // Skip back-edges |
213 | 0 | if (Pred->BlockID >= BlockID) continue; |
214 | | // If we don't yet have a candidate for dominator yet, take this one. |
215 | 0 | if (Candidate == nullptr) { |
216 | 0 | Candidate = Pred; |
217 | 0 | continue; |
218 | 0 | } |
219 | | // Walk the alternate and current candidate back to find a common ancestor. |
220 | 0 | auto *Alternate = Pred; |
221 | 0 | while (Alternate != Candidate) { |
222 | 0 | if (Candidate->BlockID > Alternate->BlockID) |
223 | 0 | Candidate = Candidate->DominatorNode.Parent; |
224 | 0 | else |
225 | 0 | Alternate = Alternate->DominatorNode.Parent; |
226 | 0 | } |
227 | 0 | } |
228 | 0 | DominatorNode.Parent = Candidate; |
229 | 0 | DominatorNode.SizeOfSubTree = 1; |
230 | 0 | } |
231 | | |
232 | | // Computes the immediate post-dominator of the current block. Assumes that all |
233 | | // of its successors have already computed their post-dominators. This is |
234 | | // achieved visiting the nodes in reverse topological order. |
235 | 0 | void BasicBlock::computePostDominator() { |
236 | 0 | BasicBlock *Candidate = nullptr; |
237 | | // Walk back from each predecessor to find the common post-dominator node. |
238 | 0 | for (auto *Succ : successors()) { |
239 | | // Skip back-edges |
240 | 0 | if (Succ->BlockID <= BlockID) continue; |
241 | | // If we don't yet have a candidate for post-dominator yet, take this one. |
242 | 0 | if (Candidate == nullptr) { |
243 | 0 | Candidate = Succ; |
244 | 0 | continue; |
245 | 0 | } |
246 | | // Walk the alternate and current candidate back to find a common ancestor. |
247 | 0 | auto *Alternate = Succ; |
248 | 0 | while (Alternate != Candidate) { |
249 | 0 | if (Candidate->BlockID < Alternate->BlockID) |
250 | 0 | Candidate = Candidate->PostDominatorNode.Parent; |
251 | 0 | else |
252 | 0 | Alternate = Alternate->PostDominatorNode.Parent; |
253 | 0 | } |
254 | 0 | } |
255 | 0 | PostDominatorNode.Parent = Candidate; |
256 | 0 | PostDominatorNode.SizeOfSubTree = 1; |
257 | 0 | } |
258 | | |
259 | | // Renumber instructions in all blocks |
260 | 0 | void SCFG::renumberInstrs() { |
261 | 0 | unsigned InstrID = 0; |
262 | 0 | for (auto *Block : Blocks) |
263 | 0 | InstrID = Block->renumberInstrs(InstrID); |
264 | 0 | } |
265 | | |
266 | | static inline void computeNodeSize(BasicBlock *B, |
267 | 0 | BasicBlock::TopologyNode BasicBlock::*TN) { |
268 | 0 | BasicBlock::TopologyNode *N = &(B->*TN); |
269 | 0 | if (N->Parent) { |
270 | 0 | BasicBlock::TopologyNode *P = &(N->Parent->*TN); |
271 | | // Initially set ID relative to the (as yet uncomputed) parent ID |
272 | 0 | N->NodeID = P->SizeOfSubTree; |
273 | 0 | P->SizeOfSubTree += N->SizeOfSubTree; |
274 | 0 | } |
275 | 0 | } |
276 | | |
277 | | static inline void computeNodeID(BasicBlock *B, |
278 | 0 | BasicBlock::TopologyNode BasicBlock::*TN) { |
279 | 0 | BasicBlock::TopologyNode *N = &(B->*TN); |
280 | 0 | if (N->Parent) { |
281 | 0 | BasicBlock::TopologyNode *P = &(N->Parent->*TN); |
282 | 0 | N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node. |
283 | 0 | } |
284 | 0 | } |
285 | | |
286 | | // Normalizes a CFG. Normalization has a few major components: |
287 | | // 1) Removing unreachable blocks. |
288 | | // 2) Computing dominators and post-dominators |
289 | | // 3) Topologically sorting the blocks into the "Blocks" array. |
290 | 0 | void SCFG::computeNormalForm() { |
291 | | // Topologically sort the blocks starting from the entry block. |
292 | 0 | unsigned NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size()); |
293 | 0 | if (NumUnreachableBlocks > 0) { |
294 | | // If there were unreachable blocks shift everything down, and delete them. |
295 | 0 | for (unsigned I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) { |
296 | 0 | unsigned NI = I - NumUnreachableBlocks; |
297 | 0 | Blocks[NI] = Blocks[I]; |
298 | 0 | Blocks[NI]->BlockID = NI; |
299 | | // FIXME: clean up predecessor pointers to unreachable blocks? |
300 | 0 | } |
301 | 0 | Blocks.drop(NumUnreachableBlocks); |
302 | 0 | } |
303 | | |
304 | | // Compute dominators. |
305 | 0 | for (auto *Block : Blocks) |
306 | 0 | Block->computeDominator(); |
307 | | |
308 | | // Once dominators have been computed, the final sort may be performed. |
309 | 0 | unsigned NumBlocks = Exit->topologicalFinalSort(Blocks, 0); |
310 | 0 | assert(static_cast<size_t>(NumBlocks) == Blocks.size()); |
311 | 0 | (void) NumBlocks; |
312 | | |
313 | | // Renumber the instructions now that we have a final sort. |
314 | 0 | renumberInstrs(); |
315 | | |
316 | | // Compute post-dominators and compute the sizes of each node in the |
317 | | // dominator tree. |
318 | 0 | for (auto *Block : Blocks.reverse()) { |
319 | 0 | Block->computePostDominator(); |
320 | 0 | computeNodeSize(Block, &BasicBlock::DominatorNode); |
321 | 0 | } |
322 | | // Compute the sizes of each node in the post-dominator tree and assign IDs in |
323 | | // the dominator tree. |
324 | 0 | for (auto *Block : Blocks) { |
325 | 0 | computeNodeID(Block, &BasicBlock::DominatorNode); |
326 | 0 | computeNodeSize(Block, &BasicBlock::PostDominatorNode); |
327 | 0 | } |
328 | | // Assign IDs in the post-dominator tree. |
329 | 0 | for (auto *Block : Blocks.reverse()) { |
330 | 0 | computeNodeID(Block, &BasicBlock::PostDominatorNode); |
331 | 0 | } |
332 | 0 | } |