/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/AST/Randstruct.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- Randstruct.cpp ---------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file contains the implementation for Clang's structure field layout |
10 | | // randomization. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "clang/AST/Randstruct.h" |
15 | | #include "clang/AST/ASTContext.h" |
16 | | #include "clang/AST/ASTDiagnostic.h" |
17 | | #include "clang/AST/Attr.h" |
18 | | #include "clang/AST/Decl.h" |
19 | | #include "clang/AST/DeclCXX.h" // For StaticAssertDecl |
20 | | #include "clang/Basic/Diagnostic.h" |
21 | | #include "llvm/ADT/SmallVector.h" |
22 | | |
23 | | #include <algorithm> |
24 | | #include <random> |
25 | | #include <set> |
26 | | #include <sstream> |
27 | | #include <string> |
28 | | |
29 | | using clang::ASTContext; |
30 | | using clang::FieldDecl; |
31 | | using llvm::SmallVector; |
32 | | |
33 | | namespace { |
34 | | |
35 | | // FIXME: Replace this with some discovery once that mechanism exists. |
36 | | enum { CACHE_LINE = 64 }; |
37 | | |
38 | | // The Bucket class holds the struct fields we're trying to fill to a |
39 | | // cache-line. |
40 | | class Bucket { |
41 | | SmallVector<FieldDecl *, 64> Fields; |
42 | | int Size = 0; |
43 | | |
44 | | public: |
45 | 67 | virtual ~Bucket() = default; |
46 | | |
47 | 67 | SmallVector<FieldDecl *, 64> &fields() { return Fields; } |
48 | | void addField(FieldDecl *Field, int FieldSize); |
49 | 54 | virtual bool canFit(int FieldSize) const { |
50 | 54 | return Size + FieldSize <= CACHE_LINE; |
51 | 54 | } |
52 | 64 | virtual bool isBitfieldRun() const { return false; } |
53 | 53 | bool full() const { return Size >= CACHE_LINE; } |
54 | | }; |
55 | | |
56 | 88 | void Bucket::addField(FieldDecl *Field, int FieldSize) { |
57 | 88 | Size += FieldSize; |
58 | 88 | Fields.push_back(Field); |
59 | 88 | } |
60 | | |
61 | | struct BitfieldRunBucket : public Bucket { |
62 | 0 | bool canFit(int FieldSize) const override { return true; } |
63 | 3 | bool isBitfieldRun() const override { return true; } |
64 | | }; |
65 | | |
66 | | void randomizeStructureLayoutImpl(const ASTContext &Context, |
67 | | llvm::SmallVectorImpl<FieldDecl *> &FieldsOut, |
68 | 19 | std::mt19937 &RNG) { |
69 | | // All of the Buckets produced by best-effort cache-line algorithm. |
70 | 19 | SmallVector<std::unique_ptr<Bucket>, 16> Buckets; |
71 | | |
72 | | // The current bucket of fields that we are trying to fill to a cache-line. |
73 | 19 | std::unique_ptr<Bucket> CurrentBucket; |
74 | | |
75 | | // The current bucket containing the run of adjacent bitfields to ensure they |
76 | | // remain adjacent. |
77 | 19 | std::unique_ptr<BitfieldRunBucket> CurrentBitfieldRun; |
78 | | |
79 | | // Tracks the number of fields that we failed to fit to the current bucket, |
80 | | // and thus still need to be added later. |
81 | 19 | size_t Skipped = 0; |
82 | | |
83 | 108 | while (!FieldsOut.empty()) { |
84 | | // If we've Skipped more fields than we have remaining to place, that means |
85 | | // that they can't fit in our current bucket, and we need to start a new |
86 | | // one. |
87 | 89 | if (Skipped >= FieldsOut.size()) { |
88 | 1 | Skipped = 0; |
89 | 1 | Buckets.push_back(std::move(CurrentBucket)); |
90 | 1 | } |
91 | | |
92 | | // Take the first field that needs to be put in a bucket. |
93 | 89 | auto FieldIter = FieldsOut.begin(); |
94 | 89 | FieldDecl *FD = *FieldIter; |
95 | | |
96 | 89 | if (FD->isBitField() && !FD->isZeroLengthBitField(Context)6 ) { |
97 | | // Start a bitfield run if this is the first bitfield we have found. |
98 | 5 | if (!CurrentBitfieldRun) |
99 | 3 | CurrentBitfieldRun = std::make_unique<BitfieldRunBucket>(); |
100 | | |
101 | | // We've placed the field, and can remove it from the "awaiting Buckets" |
102 | | // vector called "Fields." |
103 | 5 | CurrentBitfieldRun->addField(FD, /*FieldSize is irrelevant here*/ 1); |
104 | 5 | FieldsOut.erase(FieldIter); |
105 | 5 | continue; |
106 | 5 | } |
107 | | |
108 | | // Else, current field is not a bitfield. If we were previously in a |
109 | | // bitfield run, end it. |
110 | 84 | if (CurrentBitfieldRun) |
111 | 2 | Buckets.push_back(std::move(CurrentBitfieldRun)); |
112 | | |
113 | | // If we don't have a bucket, make one. |
114 | 84 | if (!CurrentBucket) |
115 | 34 | CurrentBucket = std::make_unique<Bucket>(); |
116 | | |
117 | 84 | uint64_t Width = Context.getTypeInfo(FD->getType()).Width; |
118 | 84 | if (Width >= CACHE_LINE) { |
119 | 30 | std::unique_ptr<Bucket> OverSized = std::make_unique<Bucket>(); |
120 | 30 | OverSized->addField(FD, Width); |
121 | 30 | FieldsOut.erase(FieldIter); |
122 | 30 | Buckets.push_back(std::move(OverSized)); |
123 | 30 | continue; |
124 | 30 | } |
125 | | |
126 | | // If it fits, add it. |
127 | 54 | if (CurrentBucket->canFit(Width)) { |
128 | 53 | CurrentBucket->addField(FD, Width); |
129 | 53 | FieldsOut.erase(FieldIter); |
130 | | |
131 | | // If it's now full, tie off the bucket. |
132 | 53 | if (CurrentBucket->full()) { |
133 | 20 | Skipped = 0; |
134 | 20 | Buckets.push_back(std::move(CurrentBucket)); |
135 | 20 | } |
136 | 53 | } else { |
137 | | // We can't fit it in our current bucket. Move to the end for processing |
138 | | // later. |
139 | 1 | ++Skipped; // Mark it skipped. |
140 | 1 | FieldsOut.push_back(FD); |
141 | 1 | FieldsOut.erase(FieldIter); |
142 | 1 | } |
143 | 54 | } |
144 | | |
145 | | // Done processing the fields awaiting a bucket. |
146 | | |
147 | | // If we were filling a bucket, tie it off. |
148 | 19 | if (CurrentBucket) |
149 | 13 | Buckets.push_back(std::move(CurrentBucket)); |
150 | | |
151 | | // If we were processing a bitfield run bucket, tie it off. |
152 | 19 | if (CurrentBitfieldRun) |
153 | 1 | Buckets.push_back(std::move(CurrentBitfieldRun)); |
154 | | |
155 | 19 | std::shuffle(std::begin(Buckets), std::end(Buckets), RNG); |
156 | | |
157 | | // Produce the new ordering of the elements from the Buckets. |
158 | 19 | SmallVector<FieldDecl *, 16> FinalOrder; |
159 | 67 | for (const std::unique_ptr<Bucket> &B : Buckets) { |
160 | 67 | llvm::SmallVectorImpl<FieldDecl *> &RandFields = B->fields(); |
161 | 67 | if (!B->isBitfieldRun()) |
162 | 64 | std::shuffle(std::begin(RandFields), std::end(RandFields), RNG); |
163 | | |
164 | 67 | FinalOrder.insert(FinalOrder.end(), RandFields.begin(), RandFields.end()); |
165 | 67 | } |
166 | | |
167 | 19 | FieldsOut = FinalOrder; |
168 | 19 | } |
169 | | |
170 | | } // anonymous namespace |
171 | | |
172 | | namespace clang { |
173 | | namespace randstruct { |
174 | | |
175 | | bool randomizeStructureLayout(const ASTContext &Context, RecordDecl *RD, |
176 | 20 | SmallVectorImpl<Decl *> &FinalOrdering) { |
177 | 20 | SmallVector<FieldDecl *, 64> RandomizedFields; |
178 | 20 | SmallVector<Decl *, 8> PostRandomizedFields; |
179 | | |
180 | 20 | unsigned TotalNumFields = 0; |
181 | 113 | for (Decl *D : RD->decls()) { |
182 | 113 | ++TotalNumFields; |
183 | 113 | if (auto *FD = dyn_cast<FieldDecl>(D)) |
184 | 92 | RandomizedFields.push_back(FD); |
185 | 21 | else if (isa<StaticAssertDecl>(D) || isa<IndirectFieldDecl>(D)20 ) |
186 | 15 | PostRandomizedFields.push_back(D); |
187 | 6 | else |
188 | 6 | FinalOrdering.push_back(D); |
189 | 113 | } |
190 | | |
191 | 20 | if (RandomizedFields.empty()) |
192 | 1 | return false; |
193 | | |
194 | | // Struct might end with a flexible array or an array of size 0 or 1, |
195 | | // in which case we don't want to randomize it. |
196 | 19 | FieldDecl *FlexibleArray = |
197 | 19 | RD->hasFlexibleArrayMember() ? RandomizedFields.pop_back_val()2 : nullptr17 ; |
198 | 19 | if (!FlexibleArray) { |
199 | 17 | if (const auto *CA = |
200 | 17 | dyn_cast<ConstantArrayType>(RandomizedFields.back()->getType())) |
201 | 2 | if (CA->getSize().sle(2)) |
202 | 2 | FlexibleArray = RandomizedFields.pop_back_val(); |
203 | 17 | } |
204 | | |
205 | 19 | std::string Seed = |
206 | 19 | Context.getLangOpts().RandstructSeed + RD->getNameAsString(); |
207 | 19 | std::seed_seq SeedSeq(Seed.begin(), Seed.end()); |
208 | 19 | std::mt19937 RNG(SeedSeq); |
209 | | |
210 | 19 | randomizeStructureLayoutImpl(Context, RandomizedFields, RNG); |
211 | | |
212 | | // Plorp the randomized decls into the final ordering. |
213 | 19 | FinalOrdering.insert(FinalOrdering.end(), RandomizedFields.begin(), |
214 | 19 | RandomizedFields.end()); |
215 | | |
216 | | // Add fields that belong towards the end of the RecordDecl. |
217 | 19 | FinalOrdering.insert(FinalOrdering.end(), PostRandomizedFields.begin(), |
218 | 19 | PostRandomizedFields.end()); |
219 | | |
220 | | // Add back the flexible array. |
221 | 19 | if (FlexibleArray) |
222 | 4 | FinalOrdering.push_back(FlexibleArray); |
223 | | |
224 | 19 | assert(TotalNumFields == FinalOrdering.size() && |
225 | 19 | "Decl count has been altered after Randstruct randomization!"); |
226 | 0 | (void)TotalNumFields; |
227 | 19 | return true; |
228 | 20 | } |
229 | | |
230 | | } // end namespace randstruct |
231 | | } // end namespace clang |