Coverage Report

Created: 2021-09-21 08:58

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- ThreadSafety.cpp ---------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10
// conditions), based off of an annotation system.
11
//
12
// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13
// for more information.
14
//
15
//===----------------------------------------------------------------------===//
16
17
#include "clang/Analysis/Analyses/ThreadSafety.h"
18
#include "clang/AST/Attr.h"
19
#include "clang/AST/Decl.h"
20
#include "clang/AST/DeclCXX.h"
21
#include "clang/AST/DeclGroup.h"
22
#include "clang/AST/Expr.h"
23
#include "clang/AST/ExprCXX.h"
24
#include "clang/AST/OperationKinds.h"
25
#include "clang/AST/Stmt.h"
26
#include "clang/AST/StmtVisitor.h"
27
#include "clang/AST/Type.h"
28
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
29
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
30
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
31
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
32
#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
33
#include "clang/Analysis/AnalysisDeclContext.h"
34
#include "clang/Analysis/CFG.h"
35
#include "clang/Basic/Builtins.h"
36
#include "clang/Basic/LLVM.h"
37
#include "clang/Basic/OperatorKinds.h"
38
#include "clang/Basic/SourceLocation.h"
39
#include "clang/Basic/Specifiers.h"
40
#include "llvm/ADT/ArrayRef.h"
41
#include "llvm/ADT/DenseMap.h"
42
#include "llvm/ADT/ImmutableMap.h"
43
#include "llvm/ADT/Optional.h"
44
#include "llvm/ADT/PointerIntPair.h"
45
#include "llvm/ADT/STLExtras.h"
46
#include "llvm/ADT/SmallVector.h"
47
#include "llvm/ADT/StringRef.h"
48
#include "llvm/Support/Allocator.h"
49
#include "llvm/Support/Casting.h"
50
#include "llvm/Support/ErrorHandling.h"
51
#include "llvm/Support/raw_ostream.h"
52
#include <algorithm>
53
#include <cassert>
54
#include <functional>
55
#include <iterator>
56
#include <memory>
57
#include <string>
58
#include <type_traits>
59
#include <utility>
60
#include <vector>
61
62
using namespace clang;
63
using namespace threadSafety;
64
65
// Key method definition
66
2.52k
ThreadSafetyHandler::~ThreadSafetyHandler() = default;
67
68
/// Issue a warning about an invalid lock expression
69
static void warnInvalidLock(ThreadSafetyHandler &Handler,
70
                            const Expr *MutexExp, const NamedDecl *D,
71
0
                            const Expr *DeclExp, StringRef Kind) {
72
0
  SourceLocation Loc;
73
0
  if (DeclExp)
74
0
    Loc = DeclExp->getExprLoc();
75
76
  // FIXME: add a note about the attribute location in MutexExp or D
77
0
  if (Loc.isValid())
78
0
    Handler.handleInvalidLockExp(Kind, Loc);
79
0
}
80
81
namespace {
82
83
/// A set of CapabilityExpr objects, which are compiled from thread safety
84
/// attributes on a function.
85
class CapExprSet : public SmallVector<CapabilityExpr, 4> {
86
public:
87
  /// Push M onto list, but discard duplicates.
88
5.19k
  void push_back_nodup(const CapabilityExpr &CapE) {
89
5.19k
    iterator It = std::find_if(begin(), end(),
90
5.19k
                               [=](const CapabilityExpr &CapE2) {
91
1.09k
      return CapE.equals(CapE2);
92
1.09k
    });
93
5.19k
    if (It == end())
94
4.76k
      push_back(CapE);
95
5.19k
  }
96
};
97
98
class FactManager;
99
class FactSet;
100
101
/// This is a helper class that stores a fact that is known at a
102
/// particular point in program execution.  Currently, a fact is a capability,
103
/// along with additional information, such as where it was acquired, whether
104
/// it is exclusive or shared, etc.
105
///
106
/// FIXME: this analysis does not currently support re-entrant locking.
107
class FactEntry : public CapabilityExpr {
108
public:
109
  /// Where a fact comes from.
110
  enum SourceKind {
111
    Acquired, ///< The fact has been directly acquired.
112
    Asserted, ///< The fact has been asserted to be held.
113
    Declared, ///< The fact is assumed to be held by callers.
114
    Managed,  ///< The fact has been acquired through a scoped capability.
115
  };
116
117
private:
118
  /// Exclusive or shared.
119
  LockKind LKind : 8;
120
121
  // How it was acquired.
122
  SourceKind Source : 8;
123
124
  /// Where it was acquired.
125
  SourceLocation AcquireLoc;
126
127
public:
128
  FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
129
            SourceKind Src)
130
4.89k
      : CapabilityExpr(CE), LKind(LK), Source(Src), AcquireLoc(Loc) {}
131
4.89k
  virtual ~FactEntry() = default;
132
133
2.71k
  LockKind kind() const { return LKind;      }
134
3.90k
  SourceLocation loc() const { return AcquireLoc; }
135
136
6.96k
  bool asserted() const { return Source == Asserted; }
137
2.70k
  bool declared() const { return Source == Declared; }
138
1.83k
  bool managed() const { return Source == Managed; }
139
140
  virtual void
141
  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
142
                                SourceLocation JoinLoc, LockErrorKind LEK,
143
                                ThreadSafetyHandler &Handler) const = 0;
144
  virtual void handleLock(FactSet &FSet, FactManager &FactMan,
145
                          const FactEntry &entry, ThreadSafetyHandler &Handler,
146
                          StringRef DiagKind) const = 0;
147
  virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
148
                            const CapabilityExpr &Cp, SourceLocation UnlockLoc,
149
                            bool FullyRemove, ThreadSafetyHandler &Handler,
150
                            StringRef DiagKind) const = 0;
151
152
  // Return true if LKind >= LK, where exclusive > shared
153
1.85k
  bool isAtLeast(LockKind LK) const {
154
1.85k
    return  (LKind == LK_Exclusive) || 
(LK == LK_Shared)375
;
155
1.85k
  }
156
};
157
158
using FactID = unsigned short;
159
160
/// FactManager manages the memory for all facts that are created during
161
/// the analysis of a single routine.
162
class FactManager {
163
private:
164
  std::vector<std::unique_ptr<const FactEntry>> Facts;
165
166
public:
167
4.68k
  FactID newFact(std::unique_ptr<FactEntry> Entry) {
168
4.68k
    Facts.push_back(std::move(Entry));
169
4.68k
    return static_cast<unsigned short>(Facts.size() - 1);
170
4.68k
  }
171
172
29.4k
  const FactEntry &operator[](FactID F) const { return *Facts[F]; }
173
};
174
175
/// A FactSet is the set of facts that are known to be true at a
176
/// particular program point.  FactSets must be small, because they are
177
/// frequently copied, and are thus implemented as a set of indices into a
178
/// table maintained by a FactManager.  A typical FactSet only holds 1 or 2
179
/// locks, so we can get away with doing a linear search for lookup.  Note
180
/// that a hashtable or map is inappropriate in this case, because lookups
181
/// may involve partial pattern matches, rather than exact matches.
182
class FactSet {
183
private:
184
  using FactVec = SmallVector<FactID, 4>;
185
186
  FactVec FactIDs;
187
188
public:
189
  using iterator = FactVec::iterator;
190
  using const_iterator = FactVec::const_iterator;
191
192
5.75k
  iterator begin() { return FactIDs.begin(); }
193
17.4k
  const_iterator begin() const { return FactIDs.begin(); }
194
195
8.39k
  iterator end() { return FactIDs.end(); }
196
31.8k
  const_iterator end() const { return FactIDs.end(); }
197
198
0
  bool isEmpty() const { return FactIDs.size() == 0; }
199
200
  // Return true if the set contains only negative facts
201
58
  bool isEmpty(FactManager &FactMan) const {
202
58
    for (const auto FID : *this) {
203
34
      if (!FactMan[FID].negative())
204
24
        return false;
205
34
    }
206
34
    return true;
207
58
  }
208
209
0
  void addLockByID(FactID ID) { FactIDs.push_back(ID); }
210
211
4.68k
  FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
212
4.68k
    FactID F = FM.newFact(std::move(Entry));
213
4.68k
    FactIDs.push_back(F);
214
4.68k
    return F;
215
4.68k
  }
216
217
2.73k
  bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
218
2.73k
    unsigned n = FactIDs.size();
219
2.73k
    if (n == 0)
220
0
      return false;
221
222
3.57k
    
for (unsigned i = 0; 2.73k
i < n-1;
++i842
) {
223
1.80k
      if (FM[FactIDs[i]].matches(CapE)) {
224
966
        FactIDs[i] = FactIDs[n-1];
225
966
        FactIDs.pop_back();
226
966
        return true;
227
966
      }
228
1.80k
    }
229
1.77k
    if (FM[FactIDs[n-1]].matches(CapE)) {
230
1.71k
      FactIDs.pop_back();
231
1.71k
      return true;
232
1.71k
    }
233
60
    return false;
234
1.77k
  }
235
236
2.63k
  iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
237
2.63k
    return std::find_if(begin(), end(), [&](FactID ID) {
238
1.91k
      return FM[ID].matches(CapE);
239
1.91k
    });
240
2.63k
  }
241
242
9.64k
  const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
243
9.64k
    auto I = std::find_if(begin(), end(), [&](FactID ID) {
244
9.01k
      return FM[ID].matches(CapE);
245
9.01k
    });
246
9.64k
    return I != end() ? 
&FM[*I]4.04k
:
nullptr5.59k
;
247
9.64k
  }
248
249
  const FactEntry *findLockUniv(FactManager &FM,
250
3.01k
                                const CapabilityExpr &CapE) const {
251
3.01k
    auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
252
3.01k
      return FM[ID].matchesUniv(CapE);
253
3.01k
    });
254
3.01k
    return I != end() ? 
&FM[*I]1.85k
:
nullptr1.16k
;
255
3.01k
  }
256
257
  const FactEntry *findPartialMatch(FactManager &FM,
258
1.16k
                                    const CapabilityExpr &CapE) const {
259
1.16k
    auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
260
534
      return FM[ID].partiallyMatches(CapE);
261
534
    });
262
1.16k
    return I != end() ? 
&FM[*I]76
:
nullptr1.08k
;
263
1.16k
  }
264
265
490
  bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
266
490
    auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
267
193
      return FM[ID].valueDecl() == Vd;
268
193
    });
269
490
    return I != end();
270
490
  }
271
};
272
273
class ThreadSafetyAnalyzer;
274
275
} // namespace
276
277
namespace clang {
278
namespace threadSafety {
279
280
class BeforeSet {
281
private:
282
  using BeforeVect = SmallVector<const ValueDecl *, 4>;
283
284
  struct BeforeInfo {
285
    BeforeVect Vect;
286
    int Visited = 0;
287
288
841
    BeforeInfo() = default;
289
    BeforeInfo(BeforeInfo &&) = default;
290
  };
291
292
  using BeforeMap =
293
      llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
294
  using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
295
296
public:
297
35
  BeforeSet() = default;
298
299
  BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
300
                              ThreadSafetyAnalyzer& Analyzer);
301
302
  BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
303
                                   ThreadSafetyAnalyzer &Analyzer);
304
305
  void checkBeforeAfter(const ValueDecl* Vd,
306
                        const FactSet& FSet,
307
                        ThreadSafetyAnalyzer& Analyzer,
308
                        SourceLocation Loc, StringRef CapKind);
309
310
private:
311
  BeforeMap BMap;
312
  CycleMap CycMap;
313
};
314
315
} // namespace threadSafety
316
} // namespace clang
317
318
namespace {
319
320
class LocalVariableMap;
321
322
using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
323
324
/// A side (entry or exit) of a CFG node.
325
enum CFGBlockSide { CBS_Entry, CBS_Exit };
326
327
/// CFGBlockInfo is a struct which contains all the information that is
328
/// maintained for each block in the CFG.  See LocalVariableMap for more
329
/// information about the contexts.
330
struct CFGBlockInfo {
331
  // Lockset held at entry to block
332
  FactSet EntrySet;
333
334
  // Lockset held at exit from block
335
  FactSet ExitSet;
336
337
  // Context held at entry to block
338
  LocalVarContext EntryContext;
339
340
  // Context held at exit from block
341
  LocalVarContext ExitContext;
342
343
  // Location of first statement in block
344
  SourceLocation EntryLoc;
345
346
  // Location of last statement in block.
347
  SourceLocation ExitLoc;
348
349
  // Used to replay contexts later
350
  unsigned EntryIndex;
351
352
  // Is this block reachable?
353
  bool Reachable = false;
354
355
0
  const FactSet &getSet(CFGBlockSide Side) const {
356
0
    return Side == CBS_Entry ? EntrySet : ExitSet;
357
0
  }
358
359
0
  SourceLocation getLocation(CFGBlockSide Side) const {
360
0
    return Side == CBS_Entry ? EntryLoc : ExitLoc;
361
0
  }
362
363
private:
364
  CFGBlockInfo(LocalVarContext EmptyCtx)
365
2.40k
      : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
366
367
public:
368
  static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
369
};
370
371
// A LocalVariableMap maintains a map from local variables to their currently
372
// valid definitions.  It provides SSA-like functionality when traversing the
373
// CFG.  Like SSA, each definition or assignment to a variable is assigned a
374
// unique name (an integer), which acts as the SSA name for that definition.
375
// The total set of names is shared among all CFG basic blocks.
376
// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
377
// with their SSA-names.  Instead, we compute a Context for each point in the
378
// code, which maps local variables to the appropriate SSA-name.  This map
379
// changes with each assignment.
380
//
381
// The map is computed in a single pass over the CFG.  Subsequent analyses can
382
// then query the map to find the appropriate Context for a statement, and use
383
// that Context to look up the definitions of variables.
384
class LocalVariableMap {
385
public:
386
  using Context = LocalVarContext;
387
388
  /// A VarDefinition consists of an expression, representing the value of the
389
  /// variable, along with the context in which that expression should be
390
  /// interpreted.  A reference VarDefinition does not itself contain this
391
  /// information, but instead contains a pointer to a previous VarDefinition.
392
  struct VarDefinition {
393
  public:
394
    friend class LocalVariableMap;
395
396
    // The original declaration for this variable.
397
    const NamedDecl *Dec;
398
399
    // The expression for this variable, OR
400
    const Expr *Exp = nullptr;
401
402
    // Reference to another VarDefinition
403
    unsigned Ref = 0;
404
405
    // The map with which Exp should be interpreted.
406
    Context Ctx;
407
408
64
    bool isReference() { return !Exp; }
409
410
  private:
411
    // Create ordinary variable definition
412
    VarDefinition(const NamedDecl *D, const Expr *E, Context C)
413
757
        : Dec(D), Exp(E), Ctx(C) {}
414
415
    // Create reference to previous definition
416
    VarDefinition(const NamedDecl *D, unsigned R, Context C)
417
2.58k
        : Dec(D), Ref(R), Ctx(C) {}
418
  };
419
420
private:
421
  Context::Factory ContextFactory;
422
  std::vector<VarDefinition> VarDefinitions;
423
  std::vector<unsigned> CtxIndices;
424
  std::vector<std::pair<const Stmt *, Context>> SavedContexts;
425
426
public:
427
2.52k
  LocalVariableMap() {
428
    // index 0 is a placeholder for undefined variables (aka phi-nodes).
429
2.52k
    VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
430
2.52k
  }
431
432
  /// Look up a definition, within the given context.
433
0
  const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
434
0
    const unsigned *i = Ctx.lookup(D);
435
0
    if (!i)
436
0
      return nullptr;
437
0
    assert(*i < VarDefinitions.size());
438
0
    return &VarDefinitions[*i];
439
0
  }
440
441
  /// Look up the definition for D within the given context.  Returns
442
  /// NULL if the expression is not statically known.  If successful, also
443
  /// modifies Ctx to hold the context of the return Expr.
444
292
  const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
445
292
    const unsigned *P = Ctx.lookup(D);
446
292
    if (!P)
447
104
      return nullptr;
448
449
188
    unsigned i = *P;
450
268
    while (i > 0) {
451
248
      if (VarDefinitions[i].Exp) {
452
168
        Ctx = VarDefinitions[i].Ctx;
453
168
        return VarDefinitions[i].Exp;
454
168
      }
455
80
      i = VarDefinitions[i].Ref;
456
80
    }
457
20
    return nullptr;
458
188
  }
459
460
5.07k
  Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
461
462
  /// Return the next context after processing S.  This function is used by
463
  /// clients of the class to get the appropriate context when traversing the
464
  /// CFG.  It must be called for every assignment or DeclStmt.
465
2.60k
  Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
466
2.60k
    if (SavedContexts[CtxIndex+1].first == S) {
467
776
      CtxIndex++;
468
776
      Context Result = SavedContexts[CtxIndex].second;
469
776
      return Result;
470
776
    }
471
1.83k
    return C;
472
2.60k
  }
473
474
0
  void dumpVarDefinitionName(unsigned i) {
475
0
    if (i == 0) {
476
0
      llvm::errs() << "Undefined";
477
0
      return;
478
0
    }
479
0
    const NamedDecl *Dec = VarDefinitions[i].Dec;
480
0
    if (!Dec) {
481
0
      llvm::errs() << "<<NULL>>";
482
0
      return;
483
0
    }
484
0
    Dec->printName(llvm::errs());
485
0
    llvm::errs() << "." << i << " " << ((const void*) Dec);
486
0
  }
487
488
  /// Dumps an ASCII representation of the variable map to llvm::errs()
489
0
  void dump() {
490
0
    for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
491
0
      const Expr *Exp = VarDefinitions[i].Exp;
492
0
      unsigned Ref = VarDefinitions[i].Ref;
493
0
494
0
      dumpVarDefinitionName(i);
495
0
      llvm::errs() << " = ";
496
0
      if (Exp) Exp->dump();
497
0
      else {
498
0
        dumpVarDefinitionName(Ref);
499
0
        llvm::errs() << "\n";
500
0
      }
501
0
    }
502
0
  }
503
504
  /// Dumps an ASCII representation of a Context to llvm::errs()
505
0
  void dumpContext(Context C) {
506
0
    for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
507
0
      const NamedDecl *D = I.getKey();
508
0
      D->printName(llvm::errs());
509
0
      const unsigned *i = C.lookup(D);
510
0
      llvm::errs() << " -> ";
511
0
      dumpVarDefinitionName(*i);
512
0
      llvm::errs() << "\n";
513
0
    }
514
0
  }
515
516
  /// Builds the variable map.
517
  void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
518
                   std::vector<CFGBlockInfo> &BlockInfo);
519
520
protected:
521
  friend class VarMapBuilder;
522
523
  // Get the current context index
524
8.86k
  unsigned getContextIndex() { return SavedContexts.size()-1; }
525
526
  // Save the current context for later replay
527
12.0k
  void saveContext(const Stmt *S, Context C) {
528
12.0k
    SavedContexts.push_back(std::make_pair(S, C));
529
12.0k
  }
530
531
  // Adds a new definition to the given context, and returns a new context.
532
  // This method should be called when declaring a new variable.
533
614
  Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
534
614
    assert(!Ctx.contains(D));
535
0
    unsigned newID = VarDefinitions.size();
536
614
    Context NewCtx = ContextFactory.add(Ctx, D, newID);
537
614
    VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
538
614
    return NewCtx;
539
614
  }
540
541
  // Add a new reference to an existing definition.
542
64
  Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
543
64
    unsigned newID = VarDefinitions.size();
544
64
    Context NewCtx = ContextFactory.add(Ctx, D, newID);
545
64
    VarDefinitions.push_back(VarDefinition(D, i, Ctx));
546
64
    return NewCtx;
547
64
  }
548
549
  // Updates a definition only if that definition is already in the map.
550
  // This method should be called when assigning to an existing variable.
551
143
  Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
552
143
    if (Ctx.contains(D)) {
553
143
      unsigned newID = VarDefinitions.size();
554
143
      Context NewCtx = ContextFactory.remove(Ctx, D);
555
143
      NewCtx = ContextFactory.add(NewCtx, D, newID);
556
143
      VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
557
143
      return NewCtx;
558
143
    }
559
0
    return Ctx;
560
143
  }
561
562
  // Removes a definition from the context, but keeps the variable name
563
  // as a valid variable.  The index 0 is a placeholder for cleared definitions.
564
27
  Context clearDefinition(const NamedDecl *D, Context Ctx) {
565
27
    Context NewCtx = Ctx;
566
27
    if (NewCtx.contains(D)) {
567
27
      NewCtx = ContextFactory.remove(NewCtx, D);
568
27
      NewCtx = ContextFactory.add(NewCtx, D, 0);
569
27
    }
570
27
    return NewCtx;
571
27
  }
572
573
  // Remove a definition entirely frmo the context.
574
12
  Context removeDefinition(const NamedDecl *D, Context Ctx) {
575
12
    Context NewCtx = Ctx;
576
12
    if (NewCtx.contains(D)) {
577
12
      NewCtx = ContextFactory.remove(NewCtx, D);
578
12
    }
579
12
    return NewCtx;
580
12
  }
581
582
  Context intersectContexts(Context C1, Context C2);
583
  Context createReferenceContext(Context C);
584
  void intersectBackEdge(Context C1, Context C2);
585
};
586
587
} // namespace
588
589
// This has to be defined after LocalVariableMap.
590
2.40k
CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
591
2.40k
  return CFGBlockInfo(M.getEmptyContext());
592
2.40k
}
593
594
namespace {
595
596
/// Visitor which builds a LocalVariableMap
597
class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
598
public:
599
  LocalVariableMap* VMap;
600
  LocalVariableMap::Context Ctx;
601
602
  VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
603
8.86k
      : VMap(VM), Ctx(C) {}
604
605
  void VisitDeclStmt(const DeclStmt *S);
606
  void VisitBinaryOperator(const BinaryOperator *BO);
607
};
608
609
} // namespace
610
611
// Add new local variables to the variable map
612
1.03k
void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
613
1.03k
  bool modifiedCtx = false;
614
1.03k
  const DeclGroupRef DGrp = S->getDeclGroup();
615
1.03k
  for (const auto *D : DGrp) {
616
1.03k
    if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
617
1.03k
      const Expr *E = VD->getInit();
618
619
      // Add local variables with trivial type to the variable map
620
1.03k
      QualType T = VD->getType();
621
1.03k
      if (T.isTrivialType(VD->getASTContext())) {
622
614
        Ctx = VMap->addDefinition(VD, E, Ctx);
623
614
        modifiedCtx = true;
624
614
      }
625
1.03k
    }
626
1.03k
  }
627
1.03k
  if (modifiedCtx)
628
614
    VMap->saveContext(S, Ctx);
629
1.03k
}
630
631
// Update local variable definitions in variable map
632
2.10k
void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
633
2.10k
  if (!BO->isAssignmentOp())
634
531
    return;
635
636
1.57k
  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
637
638
  // Update the variable map and current context.
639
1.57k
  if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
640
394
    const ValueDecl *VDec = DRE->getDecl();
641
394
    if (Ctx.lookup(VDec)) {
642
162
      if (BO->getOpcode() == BO_Assign)
643
143
        Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
644
19
      else
645
        // FIXME -- handle compound assignment operators
646
19
        Ctx = VMap->clearDefinition(VDec, Ctx);
647
162
      VMap->saveContext(BO, Ctx);
648
162
    }
649
394
  }
650
1.57k
}
651
652
// Computes the intersection of two contexts.  The intersection is the
653
// set of variables which have the same definition in both contexts;
654
// variables with different definitions are discarded.
655
LocalVariableMap::Context
656
664
LocalVariableMap::intersectContexts(Context C1, Context C2) {
657
664
  Context Result = C1;
658
664
  for (const auto &P : C1) {
659
308
    const NamedDecl *Dec = P.first;
660
308
    const unsigned *i2 = C2.lookup(Dec);
661
308
    if (!i2)             // variable doesn't exist on second path
662
12
      Result = removeDefinition(Dec, Result);
663
296
    else if (*i2 != P.second)  // variable exists, but has different definition
664
8
      Result = clearDefinition(Dec, Result);
665
308
  }
666
664
  return Result;
667
664
}
668
669
// For every variable in C, create a new variable that refers to the
670
// definition in C.  Return a new context that contains these new variables.
671
// (We use this for a naive implementation of SSA on loop back-edges.)
672
142
LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
673
142
  Context Result = getEmptyContext();
674
142
  for (const auto &P : C)
675
64
    Result = addReference(P.first, P.second, Result);
676
142
  return Result;
677
142
}
678
679
// This routine also takes the intersection of C1 and C2, but it does so by
680
// altering the VarDefinitions.  C1 must be the result of an earlier call to
681
// createReferenceContext.
682
134
void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
683
134
  for (const auto &P : C1) {
684
64
    unsigned i1 = P.second;
685
64
    VarDefinition *VDef = &VarDefinitions[i1];
686
64
    assert(VDef->isReference());
687
688
0
    const unsigned *i2 = C2.lookup(P.first);
689
64
    if (!i2 || (*i2 != i1))
690
12
      VDef->Ref = 0;    // Mark this variable as undefined
691
64
  }
692
134
}
693
694
// Traverse the CFG in topological order, so all predecessors of a block
695
// (excluding back-edges) are visited before the block itself.  At
696
// each point in the code, we calculate a Context, which holds the set of
697
// variable definitions which are visible at that point in execution.
698
// Visible variables are mapped to their definitions using an array that
699
// contains all definitions.
700
//
701
// At join points in the CFG, the set is computed as the intersection of
702
// the incoming sets along each edge, E.g.
703
//
704
//                       { Context                 | VarDefinitions }
705
//   int x = 0;          { x -> x1                 | x1 = 0 }
706
//   int y = 0;          { x -> x1, y -> y1        | y1 = 0, x1 = 0 }
707
//   if (b) x = 1;       { x -> x2, y -> y1        | x2 = 1, y1 = 0, ... }
708
//   else   x = 2;       { x -> x3, y -> y1        | x3 = 2, x2 = 1, ... }
709
//   ...                 { y -> y1  (x is unknown) | x3 = 2, x2 = 1, ... }
710
//
711
// This is essentially a simpler and more naive version of the standard SSA
712
// algorithm.  Those definitions that remain in the intersection are from blocks
713
// that strictly dominate the current block.  We do not bother to insert proper
714
// phi nodes, because they are not used in our analysis; instead, wherever
715
// a phi node would be required, we simply remove that definition from the
716
// context (E.g. x above).
717
//
718
// The initial traversal does not capture back-edges, so those need to be
719
// handled on a separate pass.  Whenever the first pass encounters an
720
// incoming back edge, it duplicates the context, creating new definitions
721
// that refer back to the originals.  (These correspond to places where SSA
722
// might have to insert a phi node.)  On the second pass, these definitions are
723
// set to NULL if the variable has changed on the back-edge (i.e. a phi
724
// node was actually required.)  E.g.
725
//
726
//                       { Context           | VarDefinitions }
727
//   int x = 0, y = 0;   { x -> x1, y -> y1  | y1 = 0, x1 = 0 }
728
//   while (b)           { x -> x2, y -> y1  | [1st:] x2=x1; [2nd:] x2=NULL; }
729
//     x = x+1;          { x -> x3, y -> y1  | x3 = x2 + 1, ... }
730
//   ...                 { y -> y1           | x3 = 2, x2 = 1, ... }
731
void LocalVariableMap::traverseCFG(CFG *CFGraph,
732
                                   const PostOrderCFGView *SortedGraph,
733
2.40k
                                   std::vector<CFGBlockInfo> &BlockInfo) {
734
2.40k
  PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
735
736
2.40k
  CtxIndices.resize(CFGraph->getNumBlockIDs());
737
738
8.86k
  for (const auto *CurrBlock : *SortedGraph) {
739
8.86k
    unsigned CurrBlockID = CurrBlock->getBlockID();
740
8.86k
    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
741
742
8.86k
    VisitedBlocks.insert(CurrBlock);
743
744
    // Calculate the entry context for the current block
745
8.86k
    bool HasBackEdges = false;
746
8.86k
    bool CtxInit = true;
747
8.86k
    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
748
16.1k
         PE  = CurrBlock->pred_end(); PI != PE; 
++PI7.25k
) {
749
      // if *PI -> CurrBlock is a back edge, so skip it
750
7.25k
      if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
751
142
        HasBackEdges = true;
752
142
        continue;
753
142
      }
754
755
7.11k
      unsigned PrevBlockID = (*PI)->getBlockID();
756
7.11k
      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
757
758
7.11k
      if (CtxInit) {
759
6.45k
        CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
760
6.45k
        CtxInit = false;
761
6.45k
      }
762
664
      else {
763
664
        CurrBlockInfo->EntryContext =
764
664
          intersectContexts(CurrBlockInfo->EntryContext,
765
664
                            PrevBlockInfo->ExitContext);
766
664
      }
767
7.11k
    }
768
769
    // Duplicate the context if we have back-edges, so we can call
770
    // intersectBackEdges later.
771
8.86k
    if (HasBackEdges)
772
142
      CurrBlockInfo->EntryContext =
773
142
        createReferenceContext(CurrBlockInfo->EntryContext);
774
775
    // Create a starting context index for the current block
776
8.86k
    saveContext(nullptr, CurrBlockInfo->EntryContext);
777
8.86k
    CurrBlockInfo->EntryIndex = getContextIndex();
778
779
    // Visit all the statements in the basic block.
780
8.86k
    VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
781
38.9k
    for (const auto &BI : *CurrBlock) {
782
38.9k
      switch (BI.getKind()) {
783
38.5k
        case CFGElement::Statement: {
784
38.5k
          CFGStmt CS = BI.castAs<CFGStmt>();
785
38.5k
          VMapBuilder.Visit(CS.getStmt());
786
38.5k
          break;
787
0
        }
788
384
        default:
789
384
          break;
790
38.9k
      }
791
38.9k
    }
792
8.86k
    CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
793
794
    // Mark variables on back edges as "unknown" if they've been changed.
795
8.86k
    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
796
16.1k
         SE  = CurrBlock->succ_end(); SI != SE; 
++SI7.25k
) {
797
      // if CurrBlock -> *SI is *not* a back edge
798
7.25k
      if (*SI == nullptr || 
!VisitedBlocks.alreadySet(*SI)7.25k
)
799
7.12k
        continue;
800
801
134
      CFGBlock *FirstLoopBlock = *SI;
802
134
      Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
803
134
      Context LoopEnd   = CurrBlockInfo->ExitContext;
804
134
      intersectBackEdge(LoopBegin, LoopEnd);
805
134
    }
806
8.86k
  }
807
808
  // Put an extra entry at the end of the indexed context array
809
2.40k
  unsigned exitID = CFGraph->getExit().getBlockID();
810
2.40k
  saveContext(nullptr, BlockInfo[exitID].ExitContext);
811
2.40k
}
812
813
/// Find the appropriate source locations to use when producing diagnostics for
814
/// each block in the CFG.
815
static void findBlockLocations(CFG *CFGraph,
816
                               const PostOrderCFGView *SortedGraph,
817
2.40k
                               std::vector<CFGBlockInfo> &BlockInfo) {
818
8.86k
  for (const auto *CurrBlock : *SortedGraph) {
819
8.86k
    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
820
821
    // Find the source location of the last statement in the block, if the
822
    // block is not empty.
823
8.86k
    if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
824
844
      CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
825
8.01k
    } else {
826
8.01k
      for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
827
8.36k
           BE = CurrBlock->rend(); BI != BE; 
++BI350
) {
828
        // FIXME: Handle other CFGElement kinds.
829
3.41k
        if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
830
3.06k
          CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
831
3.06k
          break;
832
3.06k
        }
833
3.41k
      }
834
8.01k
    }
835
836
8.86k
    if (CurrBlockInfo->ExitLoc.isValid()) {
837
      // This block contains at least one statement. Find the source location
838
      // of the first statement in the block.
839
3.91k
      for (const auto &BI : *CurrBlock) {
840
        // FIXME: Handle other CFGElement kinds.
841
3.88k
        if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
842
3.88k
          CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
843
3.88k
          break;
844
3.88k
        }
845
3.88k
      }
846
4.95k
    } else if (CurrBlock->pred_size() == 1 && 
*CurrBlock->pred_begin()2.32k
&&
847
4.95k
               
CurrBlock != &CFGraph->getExit()2.32k
) {
848
      // The block is empty, and has a single predecessor. Use its exit
849
      // location.
850
114
      CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
851
114
          BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
852
4.83k
    } else if (CurrBlock->succ_size() == 1 && 
*CurrBlock->succ_begin()2.43k
) {
853
      // The block is empty, and has a single successor. Use its entry
854
      // location.
855
2.43k
      CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
856
2.43k
          BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
857
2.43k
    }
858
8.86k
  }
859
2.40k
}
860
861
namespace {
862
863
class LockableFactEntry : public FactEntry {
864
public:
865
  LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
866
                    SourceKind Src = Acquired)
867
4.58k
      : FactEntry(CE, LK, Loc, Src) {}
868
869
  void
870
  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
871
                                SourceLocation JoinLoc, LockErrorKind LEK,
872
1.63k
                                ThreadSafetyHandler &Handler) const override {
873
1.63k
    if (!asserted() && 
!negative()1.60k
&&
!isUniversal()173
) {
874
169
      Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
875
169
                                        LEK);
876
169
    }
877
1.63k
  }
878
879
  void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
880
                  ThreadSafetyHandler &Handler,
881
67
                  StringRef DiagKind) const override {
882
67
    Handler.handleDoubleLock(DiagKind, entry.toString(), loc(), entry.loc());
883
67
  }
884
885
  void handleUnlock(FactSet &FSet, FactManager &FactMan,
886
                    const CapabilityExpr &Cp, SourceLocation UnlockLoc,
887
                    bool FullyRemove, ThreadSafetyHandler &Handler,
888
1.46k
                    StringRef DiagKind) const override {
889
1.46k
    FSet.removeLock(FactMan, Cp);
890
1.46k
    if (!Cp.negative()) {
891
1.46k
      FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
892
1.46k
                                !Cp, LK_Exclusive, UnlockLoc));
893
1.46k
    }
894
1.46k
  }
895
};
896
897
class ScopedLockableFactEntry : public FactEntry {
898
private:
899
  enum UnderlyingCapabilityKind {
900
    UCK_Acquired,          ///< Any kind of acquired capability.
901
    UCK_ReleasedShared,    ///< Shared capability that was released.
902
    UCK_ReleasedExclusive, ///< Exclusive capability that was released.
903
  };
904
905
  using UnderlyingCapability =
906
      llvm::PointerIntPair<const til::SExpr *, 2, UnderlyingCapabilityKind>;
907
908
  SmallVector<UnderlyingCapability, 4> UnderlyingMutexes;
909
910
public:
911
  ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
912
316
      : FactEntry(CE, LK_Exclusive, Loc, Acquired) {}
913
914
284
  void addLock(const CapabilityExpr &M) {
915
284
    UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
916
284
  }
917
918
28
  void addExclusiveUnlock(const CapabilityExpr &M) {
919
28
    UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedExclusive);
920
28
  }
921
922
8
  void addSharedUnlock(const CapabilityExpr &M) {
923
8
    UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedShared);
924
8
  }
925
926
  void
927
  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
928
                                SourceLocation JoinLoc, LockErrorKind LEK,
929
28
                                ThreadSafetyHandler &Handler) const override {
930
28
    for (const auto &UnderlyingMutex : UnderlyingMutexes) {
931
28
      const auto *Entry = FSet.findLock(
932
28
          FactMan, CapabilityExpr(UnderlyingMutex.getPointer(), false));
933
28
      if ((UnderlyingMutex.getInt() == UCK_Acquired && Entry) ||
934
28
          
(16
UnderlyingMutex.getInt() != UCK_Acquired16
&&
!Entry0
)) {
935
        // If this scoped lock manages another mutex, and if the underlying
936
        // mutex is still/not held, then warn about the underlying mutex.
937
12
        Handler.handleMutexHeldEndOfScope(
938
12
            "mutex", sx::toString(UnderlyingMutex.getPointer()), loc(), JoinLoc,
939
12
            LEK);
940
12
      }
941
28
    }
942
28
  }
943
944
  void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
945
                  ThreadSafetyHandler &Handler,
946
120
                  StringRef DiagKind) const override {
947
120
    for (const auto &UnderlyingMutex : UnderlyingMutexes) {
948
120
      CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
949
950
120
      if (UnderlyingMutex.getInt() == UCK_Acquired)
951
112
        lock(FSet, FactMan, UnderCp, entry.kind(), entry.loc(), &Handler,
952
112
             DiagKind);
953
8
      else
954
8
        unlock(FSet, FactMan, UnderCp, entry.loc(), &Handler, DiagKind);
955
120
    }
956
120
  }
957
958
  void handleUnlock(FactSet &FSet, FactManager &FactMan,
959
                    const CapabilityExpr &Cp, SourceLocation UnlockLoc,
960
                    bool FullyRemove, ThreadSafetyHandler &Handler,
961
424
                    StringRef DiagKind) const override {
962
424
    assert(!Cp.negative() && "Managing object cannot be negative.");
963
428
    for (const auto &UnderlyingMutex : UnderlyingMutexes) {
964
428
      CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
965
966
      // Remove/lock the underlying mutex if it exists/is still unlocked; warn
967
      // on double unlocking/locking if we're not destroying the scoped object.
968
428
      ThreadSafetyHandler *TSHandler = FullyRemove ? 
nullptr292
:
&Handler136
;
969
428
      if (UnderlyingMutex.getInt() == UCK_Acquired) {
970
372
        unlock(FSet, FactMan, UnderCp, UnlockLoc, TSHandler, DiagKind);
971
372
      } else {
972
56
        LockKind kind = UnderlyingMutex.getInt() == UCK_ReleasedShared
973
56
                            ? 
LK_Shared8
974
56
                            : 
LK_Exclusive48
;
975
56
        lock(FSet, FactMan, UnderCp, kind, UnlockLoc, TSHandler, DiagKind);
976
56
      }
977
428
    }
978
424
    if (FullyRemove)
979
288
      FSet.removeLock(FactMan, Cp);
980
424
  }
981
982
private:
983
  void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
984
            LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler,
985
168
            StringRef DiagKind) const {
986
168
    if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
987
20
      if (Handler)
988
12
        Handler->handleDoubleLock(DiagKind, Cp.toString(), Fact->loc(), loc);
989
148
    } else {
990
148
      FSet.removeLock(FactMan, !Cp);
991
148
      FSet.addLock(FactMan,
992
148
                   std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed));
993
148
    }
994
168
  }
995
996
  void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
997
              SourceLocation loc, ThreadSafetyHandler *Handler,
998
380
              StringRef DiagKind) const {
999
380
    if (FSet.findLock(FactMan, Cp)) {
1000
312
      FSet.removeLock(FactMan, Cp);
1001
312
      FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
1002
312
                                !Cp, LK_Exclusive, loc));
1003
312
    } else 
if (68
Handler68
) {
1004
16
      SourceLocation PrevLoc;
1005
16
      if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1006
12
        PrevLoc = Neg->loc();
1007
16
      Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc, PrevLoc);
1008
16
    }
1009
380
  }
1010
};
1011
1012
/// Class which implements the core thread safety analysis routines.
1013
class ThreadSafetyAnalyzer {
1014
  friend class BuildLockset;
1015
  friend class threadSafety::BeforeSet;
1016
1017
  llvm::BumpPtrAllocator Bpa;
1018
  threadSafety::til::MemRegionRef Arena;
1019
  threadSafety::SExprBuilder SxBuilder;
1020
1021
  ThreadSafetyHandler &Handler;
1022
  const CXXMethodDecl *CurrentMethod;
1023
  LocalVariableMap LocalVarMap;
1024
  FactManager FactMan;
1025
  std::vector<CFGBlockInfo> BlockInfo;
1026
1027
  BeforeSet *GlobalBeforeSet;
1028
1029
public:
1030
  ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
1031
2.52k
      : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
1032
1033
  bool inCurrentScope(const CapabilityExpr &CapE);
1034
1035
  void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
1036
               StringRef DiagKind, bool ReqAttr = false);
1037
  void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1038
                  SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
1039
                  StringRef DiagKind);
1040
1041
  template <typename AttrType>
1042
  void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1043
                   const NamedDecl *D, VarDecl *SelfDecl = nullptr);
1044
1045
  template <class AttrType>
1046
  void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1047
                   const NamedDecl *D,
1048
                   const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1049
                   Expr *BrE, bool Neg);
1050
1051
  const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1052
                                     bool &Negate);
1053
1054
  void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1055
                      const CFGBlock* PredBlock,
1056
                      const CFGBlock *CurrBlock);
1057
1058
  bool join(const FactEntry &a, const FactEntry &b, bool CanModify);
1059
1060
  void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1061
                        SourceLocation JoinLoc, LockErrorKind EntryLEK,
1062
                        LockErrorKind ExitLEK);
1063
1064
  void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1065
778
                        SourceLocation JoinLoc, LockErrorKind LEK) {
1066
778
    intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1067
778
  }
1068
1069
  void runAnalysis(AnalysisDeclContext &AC);
1070
};
1071
1072
} // namespace
1073
1074
/// Process acquired_before and acquired_after attributes on Vd.
1075
BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1076
841
    ThreadSafetyAnalyzer& Analyzer) {
1077
  // Create a new entry for Vd.
1078
841
  BeforeInfo *Info = nullptr;
1079
841
  {
1080
    // Keep InfoPtr in its own scope in case BMap is modified later and the
1081
    // reference becomes invalid.
1082
841
    std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1083
841
    if (!InfoPtr)
1084
841
      InfoPtr.reset(new BeforeInfo());
1085
841
    Info = InfoPtr.get();
1086
841
  }
1087
1088
841
  for (const auto *At : Vd->attrs()) {
1089
137
    switch (At->getKind()) {
1090
52
      case attr::AcquiredBefore: {
1091
52
        const auto *A = cast<AcquiredBeforeAttr>(At);
1092
1093
        // Read exprs from the attribute, and add them to BeforeVect.
1094
64
        for (const auto *Arg : A->args()) {
1095
64
          CapabilityExpr Cp =
1096
64
            Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1097
64
          if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1098
60
            Info->Vect.push_back(Cpvd);
1099
60
            const auto It = BMap.find(Cpvd);
1100
60
            if (It == BMap.end())
1101
40
              insertAttrExprs(Cpvd, Analyzer);
1102
60
          }
1103
64
        }
1104
52
        break;
1105
0
      }
1106
85
      case attr::AcquiredAfter: {
1107
85
        const auto *A = cast<AcquiredAfterAttr>(At);
1108
1109
        // Read exprs from the attribute, and add them to BeforeVect.
1110
93
        for (const auto *Arg : A->args()) {
1111
93
          CapabilityExpr Cp =
1112
93
            Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1113
93
          if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1114
            // Get entry for mutex listed in attribute
1115
93
            BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1116
93
            ArgInfo->Vect.push_back(Vd);
1117
93
          }
1118
93
        }
1119
85
        break;
1120
0
      }
1121
0
      default:
1122
0
        break;
1123
137
    }
1124
137
  }
1125
1126
841
  return Info;
1127
841
}
1128
1129
BeforeSet::BeforeInfo *
1130
BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1131
2.73k
                                ThreadSafetyAnalyzer &Analyzer) {
1132
2.73k
  auto It = BMap.find(Vd);
1133
2.73k
  BeforeInfo *Info = nullptr;
1134
2.73k
  if (It == BMap.end())
1135
801
    Info = insertAttrExprs(Vd, Analyzer);
1136
1.93k
  else
1137
1.93k
    Info = It->second.get();
1138
2.73k
  assert(Info && "BMap contained nullptr?");
1139
0
  return Info;
1140
2.73k
}
1141
1142
/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1143
void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1144
                                 const FactSet& FSet,
1145
                                 ThreadSafetyAnalyzer& Analyzer,
1146
2.19k
                                 SourceLocation Loc, StringRef CapKind) {
1147
2.19k
  SmallVector<BeforeInfo*, 8> InfoVect;
1148
1149
  // Do a depth-first traversal of Vd.
1150
  // Return true if there are cycles.
1151
2.68k
  std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1152
2.68k
    if (!Vd)
1153
44
      return false;
1154
1155
2.64k
    BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1156
1157
2.64k
    if (Info->Visited == 1)
1158
32
      return true;
1159
1160
2.61k
    if (Info->Visited == 2)
1161
16
      return false;
1162
1163
2.59k
    if (Info->Vect.empty())
1164
2.14k
      return false;
1165
1166
450
    InfoVect.push_back(Info);
1167
450
    Info->Visited = 1;
1168
490
    for (const auto *Vdb : Info->Vect) {
1169
      // Exclude mutexes in our immediate before set.
1170
490
      if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1171
49
        StringRef L1 = StartVd->getName();
1172
49
        StringRef L2 = Vdb->getName();
1173
49
        Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1174
49
      }
1175
      // Transitively search other before sets, and warn on cycles.
1176
490
      if (traverse(Vdb)) {
1177
32
        if (CycMap.find(Vd) == CycMap.end()) {
1178
20
          CycMap.insert(std::make_pair(Vd, true));
1179
20
          StringRef L1 = Vd->getName();
1180
20
          Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1181
20
        }
1182
32
      }
1183
490
    }
1184
450
    Info->Visited = 2;
1185
450
    return false;
1186
2.59k
  };
1187
1188
2.19k
  traverse(StartVd);
1189
1190
2.19k
  for (auto *Info : InfoVect)
1191
450
    Info->Visited = 0;
1192
2.19k
}
1193
1194
/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1195
18.9k
static const ValueDecl *getValueDecl(const Expr *Exp) {
1196
18.9k
  if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1197
0
    return getValueDecl(CE->getSubExpr());
1198
1199
18.9k
  if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1200
5.92k
    return DR->getDecl();
1201
1202
13.0k
  if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1203
8.24k
    return ME->getMemberDecl();
1204
1205
4.82k
  return nullptr;
1206
13.0k
}
1207
1208
namespace {
1209
1210
template <typename Ty>
1211
class has_arg_iterator_range {
1212
  using yes = char[1];
1213
  using no = char[2];
1214
1215
  template <typename Inner>
1216
  static yes& test(Inner *I, decltype(I->args()) * = nullptr);
1217
1218
  template <typename>
1219
  static no& test(...);
1220
1221
public:
1222
  static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
1223
};
1224
1225
} // namespace
1226
1227
4.19k
static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
1228
4.19k
  return A->getName();
1229
4.19k
}
1230
1231
4.80k
static StringRef ClassifyDiagnostic(QualType VDT) {
1232
  // We need to look at the declaration of the type of the value to determine
1233
  // which it is. The type should either be a record or a typedef, or a pointer
1234
  // or reference thereof.
1235
4.80k
  if (const auto *RT = VDT->getAs<RecordType>()) {
1236
4.30k
    if (const auto *RD = RT->getDecl())
1237
4.30k
      if (const auto *CA = RD->getAttr<CapabilityAttr>())
1238
4.15k
        return ClassifyDiagnostic(CA);
1239
4.30k
  } else 
if (const auto *495
TT495
= VDT->getAs<TypedefType>()) {
1240
44
    if (const auto *TD = TT->getDecl())
1241
44
      if (const auto *CA = TD->getAttr<CapabilityAttr>())
1242
44
        return ClassifyDiagnostic(CA);
1243
451
  } else if (VDT->isPointerType() || 
VDT->isReferenceType()6
)
1244
447
    return ClassifyDiagnostic(VDT->getPointeeType());
1245
1246
156
  return "mutex";
1247
4.80k
}
1248
1249
4.35k
static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
1250
4.35k
  assert(VD && "No ValueDecl passed");
1251
1252
  // The ValueDecl is the declaration of a mutex or role (hopefully).
1253
0
  return ClassifyDiagnostic(VD->getType());
1254
4.35k
}
1255
1256
template <typename AttrTy>
1257
static std::enable_if_t<!has_arg_iterator_range<AttrTy>::value, StringRef>
1258
2.42k
ClassifyDiagnostic(const AttrTy *A) {
1259
2.42k
  if (const ValueDecl *VD = getValueDecl(A->getArg()))
1260
2.23k
    return ClassifyDiagnostic(VD);
1261
184
  return "mutex";
1262
2.42k
}
ThreadSafety.cpp:std::__1::enable_if<!(has_arg_iterator_range<clang::PtGuardedByAttr>::value), llvm::StringRef>::type ClassifyDiagnostic<clang::PtGuardedByAttr>(clang::PtGuardedByAttr const*)
Line
Count
Source
1258
388
ClassifyDiagnostic(const AttrTy *A) {
1259
388
  if (const ValueDecl *VD = getValueDecl(A->getArg()))
1260
388
    return ClassifyDiagnostic(VD);
1261
0
  return "mutex";
1262
388
}
ThreadSafety.cpp:std::__1::enable_if<!(has_arg_iterator_range<clang::GuardedByAttr>::value), llvm::StringRef>::type ClassifyDiagnostic<clang::GuardedByAttr>(clang::GuardedByAttr const*)
Line
Count
Source
1258
2.03k
ClassifyDiagnostic(const AttrTy *A) {
1259
2.03k
  if (const ValueDecl *VD = getValueDecl(A->getArg()))
1260
1.84k
    return ClassifyDiagnostic(VD);
1261
184
  return "mutex";
1262
2.03k
}
1263
1264
template <typename AttrTy>
1265
static std::enable_if_t<has_arg_iterator_range<AttrTy>::value, StringRef>
1266
5.75k
ClassifyDiagnostic(const AttrTy *A) {
1267
5.75k
  for (const auto *Arg : A->args()) {
1268
2.52k
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
2.11k
      return ClassifyDiagnostic(VD);
1270
2.52k
  }
1271
3.63k
  return "mutex";
1272
5.75k
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::RequiresCapabilityAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::RequiresCapabilityAttr>(clang::RequiresCapabilityAttr const*)
Line
Count
Source
1266
1.10k
ClassifyDiagnostic(const AttrTy *A) {
1267
1.13k
  for (const auto *Arg : A->args()) {
1268
1.13k
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
848
      return ClassifyDiagnostic(VD);
1270
1.13k
  }
1271
254
  return "mutex";
1272
1.10k
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::ReleaseCapabilityAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::ReleaseCapabilityAttr>(clang::ReleaseCapabilityAttr const*)
Line
Count
Source
1266
2.10k
ClassifyDiagnostic(const AttrTy *A) {
1267
2.10k
  for (const auto *Arg : A->args()) {
1268
459
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
415
      return ClassifyDiagnostic(VD);
1270
459
  }
1271
1.68k
  return "mutex";
1272
2.10k
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::AcquireCapabilityAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::AcquireCapabilityAttr>(clang::AcquireCapabilityAttr const*)
Line
Count
Source
1266
1.92k
ClassifyDiagnostic(const AttrTy *A) {
1267
1.92k
  for (const auto *Arg : A->args()) {
1268
622
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
566
      return ClassifyDiagnostic(VD);
1270
622
  }
1271
1.36k
  return "mutex";
1272
1.92k
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::TryAcquireCapabilityAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::TryAcquireCapabilityAttr>(clang::TryAcquireCapabilityAttr const*)
Line
Count
Source
1266
168
ClassifyDiagnostic(const AttrTy *A) {
1267
168
  for (const auto *Arg : A->args()) {
1268
52
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
48
      return ClassifyDiagnostic(VD);
1270
52
  }
1271
120
  return "mutex";
1272
168
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::ExclusiveTrylockFunctionAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::ExclusiveTrylockFunctionAttr>(clang::ExclusiveTrylockFunctionAttr const*)
Line
Count
Source
1266
156
ClassifyDiagnostic(const AttrTy *A) {
1267
156
  for (const auto *Arg : A->args()) {
1268
44
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
40
      return ClassifyDiagnostic(VD);
1270
44
  }
1271
116
  return "mutex";
1272
156
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::SharedTrylockFunctionAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::SharedTrylockFunctionAttr>(clang::SharedTrylockFunctionAttr const*)
Line
Count
Source
1266
12
ClassifyDiagnostic(const AttrTy *A) {
1267
12
  for (const auto *Arg : A->args()) {
1268
8
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
8
      return ClassifyDiagnostic(VD);
1270
8
  }
1271
4
  return "mutex";
1272
12
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::AssertExclusiveLockAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::AssertExclusiveLockAttr>(clang::AssertExclusiveLockAttr const*)
Line
Count
Source
1266
40
ClassifyDiagnostic(const AttrTy *A) {
1267
40
  for (const auto *Arg : A->args()) {
1268
10
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
10
      return ClassifyDiagnostic(VD);
1270
10
  }
1271
30
  return "mutex";
1272
40
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::AssertSharedLockAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::AssertSharedLockAttr>(clang::AssertSharedLockAttr const*)
Line
Count
Source
1266
16
ClassifyDiagnostic(const AttrTy *A) {
1267
16
  for (const auto *Arg : A->args()) {
1268
8
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
8
      return ClassifyDiagnostic(VD);
1270
8
  }
1271
8
  return "mutex";
1272
16
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::AssertCapabilityAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::AssertCapabilityAttr>(clang::AssertCapabilityAttr const*)
Line
Count
Source
1266
56
ClassifyDiagnostic(const AttrTy *A) {
1267
56
  for (const auto *Arg : A->args()) {
1268
18
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
18
      return ClassifyDiagnostic(VD);
1270
18
  }
1271
38
  return "mutex";
1272
56
}
ThreadSafety.cpp:std::__1::enable_if<has_arg_iterator_range<clang::LocksExcludedAttr>::value, llvm::StringRef>::type ClassifyDiagnostic<clang::LocksExcludedAttr>(clang::LocksExcludedAttr const*)
Line
Count
Source
1266
174
ClassifyDiagnostic(const AttrTy *A) {
1267
174
  for (const auto *Arg : A->args()) {
1268
174
    if (const ValueDecl *VD = getValueDecl(Arg))
1269
158
      return ClassifyDiagnostic(VD);
1270
174
  }
1271
16
  return "mutex";
1272
174
}
1273
1274
2.16k
bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1275
2.16k
  const threadSafety::til::SExpr *SExp = CapE.sexpr();
1276
2.16k
  assert(SExp && "Null expressions should be ignored");
1277
1278
2.16k
  if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1279
976
    const ValueDecl *VD = LP->clangDecl();
1280
    // Variables defined in a function are always inaccessible.
1281
976
    if (!VD->isDefinedOutsideFunctionOrMethod())
1282
457
      return false;
1283
    // For now we consider static class members to be inaccessible.
1284
519
    if (isa<CXXRecordDecl>(VD->getDeclContext()))
1285
16
      return false;
1286
    // Global variables are always in scope.
1287
503
    return true;
1288
519
  }
1289
1290
  // Members are in scope from methods of the same class.
1291
1.19k
  if (const auto *P = dyn_cast<til::Project>(SExp)) {
1292
1.16k
    if (!CurrentMethod)
1293
331
      return false;
1294
829
    const ValueDecl *VD = P->clangDecl();
1295
829
    return VD->getDeclContext() == CurrentMethod->getDeclContext();
1296
1.16k
  }
1297
1298
32
  return false;
1299
1.19k
}
1300
1301
/// Add a new lock to the lockset, warning if the lock is already there.
1302
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1303
void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
1304
                                   std::unique_ptr<FactEntry> Entry,
1305
2.90k
                                   StringRef DiagKind, bool ReqAttr) {
1306
2.90k
  if (Entry->shouldIgnore())
1307
0
    return;
1308
1309
2.90k
  if (!ReqAttr && 
!Entry->negative()2.38k
) {
1310
    // look for the negative capability, and remove it from the fact set.
1311
2.38k
    CapabilityExpr NegC = !*Entry;
1312
2.38k
    const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1313
2.38k
    if (Nen) {
1314
274
      FSet.removeLock(FactMan, NegC);
1315
274
    }
1316
2.10k
    else {
1317
2.10k
      if (inCurrentScope(*Entry) && 
!Entry->asserted()1.22k
)
1318
1.11k
        Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
1319
1.11k
                                      NegC.toString(), Entry->loc());
1320
2.10k
    }
1321
2.38k
  }
1322
1323
  // Check before/after constraints
1324
2.90k
  if (Handler.issueBetaWarnings() &&
1325
2.90k
      
!Entry->asserted()2.82k
&&
!Entry->declared()2.70k
) {
1326
2.19k
    GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1327
2.19k
                                      Entry->loc(), DiagKind);
1328
2.19k
  }
1329
1330
  // FIXME: Don't always warn when we have support for reentrant locks.
1331
2.90k
  if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1332
215
    if (!Entry->asserted())
1333
187
      Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind);
1334
2.68k
  } else {
1335
2.68k
    FSet.addLock(FactMan, std::move(Entry));
1336
2.68k
  }
1337
2.90k
}
1338
1339
/// Remove a lock from the lockset, warning if the lock is not there.
1340
/// \param UnlockLoc The source location of the unlock (only used in error msg)
1341
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1342
                                      SourceLocation UnlockLoc,
1343
                                      bool FullyRemove, LockKind ReceivedKind,
1344
1.97k
                                      StringRef DiagKind) {
1345
1.97k
  if (Cp.shouldIgnore())
1346
0
    return;
1347
1348
1.97k
  const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1349
1.97k
  if (!LDat) {
1350
90
    SourceLocation PrevLoc;
1351
90
    if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1352
42
      PrevLoc = Neg->loc();
1353
90
    Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc, PrevLoc);
1354
90
    return;
1355
90
  }
1356
1357
  // Generic lock removal doesn't care about lock kind mismatches, but
1358
  // otherwise diagnose when the lock kinds are mismatched.
1359
1.88k
  if (ReceivedKind != LK_Generic && 
LDat->kind() != ReceivedKind186
) {
1360
18
    Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), LDat->kind(),
1361
18
                                      ReceivedKind, LDat->loc(), UnlockLoc);
1362
18
  }
1363
1364
1.88k
  LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
1365
1.88k
                     DiagKind);
1366
1.88k
}
1367
1368
/// Extract the list of mutexIDs from the attribute on an expression,
1369
/// and push them onto Mtxs, discarding any duplicates.
1370
template <typename AttrType>
1371
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1372
                                       const Expr *Exp, const NamedDecl *D,
1373
4.85k
                                       VarDecl *SelfDecl) {
1374
4.85k
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
3.14k
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
3.14k
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
3.14k
    if (!Cp.shouldIgnore())
1383
3.14k
      Mtxs.push_back_nodup(Cp);
1384
3.14k
    return;
1385
3.14k
  }
1386
1387
2.06k
  
for (const auto *Arg : Attr->args())1.71k
{
1388
2.06k
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
2.06k
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
2.06k
    if (!Cp.shouldIgnore())
1395
2.05k
      Mtxs.push_back_nodup(Cp);
1396
2.06k
  }
1397
1.71k
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::RequiresCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::RequiresCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
430
                                       VarDecl *SelfDecl) {
1374
430
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
0
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
0
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
0
    if (!Cp.shouldIgnore())
1383
0
      Mtxs.push_back_nodup(Cp);
1384
0
    return;
1385
0
  }
1386
1387
482
  
for (const auto *Arg : Attr->args())430
{
1388
482
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
482
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
482
    if (!Cp.shouldIgnore())
1395
482
      Mtxs.push_back_nodup(Cp);
1396
482
  }
1397
430
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::ReleaseCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::ReleaseCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
2.18k
                                       VarDecl *SelfDecl) {
1374
2.18k
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
1.64k
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
1.64k
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
1.64k
    if (!Cp.shouldIgnore())
1383
1.64k
      Mtxs.push_back_nodup(Cp);
1384
1.64k
    return;
1385
1.64k
  }
1386
1387
696
  
for (const auto *Arg : Attr->args())536
{
1388
696
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
696
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
696
    if (!Cp.shouldIgnore())
1395
692
      Mtxs.push_back_nodup(Cp);
1396
696
  }
1397
536
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AcquireCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::AcquireCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
1.92k
                                       VarDecl *SelfDecl) {
1374
1.92k
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
1.30k
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
1.30k
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
1.30k
    if (!Cp.shouldIgnore())
1383
1.30k
      Mtxs.push_back_nodup(Cp);
1384
1.30k
    return;
1385
1.30k
  }
1386
1387
754
  
for (const auto *Arg : Attr->args())622
{
1388
754
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
754
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
754
    if (!Cp.shouldIgnore())
1395
746
      Mtxs.push_back_nodup(Cp);
1396
754
  }
1397
622
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::TryAcquireCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::TryAcquireCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
84
                                       VarDecl *SelfDecl) {
1374
84
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
58
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
58
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
58
    if (!Cp.shouldIgnore())
1383
58
      Mtxs.push_back_nodup(Cp);
1384
58
    return;
1385
58
  }
1386
1387
26
  for (const auto *Arg : Attr->args()) {
1388
26
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
26
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
26
    if (!Cp.shouldIgnore())
1395
26
      Mtxs.push_back_nodup(Cp);
1396
26
  }
1397
26
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::ExclusiveTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::ExclusiveTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
78
                                       VarDecl *SelfDecl) {
1374
78
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
56
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
56
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
56
    if (!Cp.shouldIgnore())
1383
56
      Mtxs.push_back_nodup(Cp);
1384
56
    return;
1385
56
  }
1386
1387
22
  for (const auto *Arg : Attr->args()) {
1388
22
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
22
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
22
    if (!Cp.shouldIgnore())
1395
22
      Mtxs.push_back_nodup(Cp);
1396
22
  }
1397
22
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::SharedTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::SharedTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
6
                                       VarDecl *SelfDecl) {
1374
6
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
2
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
2
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
2
    if (!Cp.shouldIgnore())
1383
2
      Mtxs.push_back_nodup(Cp);
1384
2
    return;
1385
2
  }
1386
1387
4
  for (const auto *Arg : Attr->args()) {
1388
4
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
4
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
4
    if (!Cp.shouldIgnore())
1395
4
      Mtxs.push_back_nodup(Cp);
1396
4
  }
1397
4
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AssertExclusiveLockAttr const>((anonymous namespace)::CapExprSet&, clang::AssertExclusiveLockAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
38
                                       VarDecl *SelfDecl) {
1374
38
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
30
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
30
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
30
    if (!Cp.shouldIgnore())
1383
30
      Mtxs.push_back_nodup(Cp);
1384
30
    return;
1385
30
  }
1386
1387
10
  
for (const auto *Arg : Attr->args())8
{
1388
10
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
10
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
10
    if (!Cp.shouldIgnore())
1395
10
      Mtxs.push_back_nodup(Cp);
1396
10
  }
1397
8
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AssertSharedLockAttr const>((anonymous namespace)::CapExprSet&, clang::AssertSharedLockAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
14
                                       VarDecl *SelfDecl) {
1374
14
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
8
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
8
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
8
    if (!Cp.shouldIgnore())
1383
8
      Mtxs.push_back_nodup(Cp);
1384
8
    return;
1385
8
  }
1386
1387
8
  
for (const auto *Arg : Attr->args())6
{
1388
8
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
8
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
8
    if (!Cp.shouldIgnore())
1395
8
      Mtxs.push_back_nodup(Cp);
1396
8
  }
1397
6
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AssertCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::AssertCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
52
                                       VarDecl *SelfDecl) {
1374
52
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
38
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
38
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
38
    if (!Cp.shouldIgnore())
1383
38
      Mtxs.push_back_nodup(Cp);
1384
38
    return;
1385
38
  }
1386
1387
18
  
for (const auto *Arg : Attr->args())14
{
1388
18
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
18
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
18
    if (!Cp.shouldIgnore())
1395
18
      Mtxs.push_back_nodup(Cp);
1396
18
  }
1397
14
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::LocksExcludedAttr const>((anonymous namespace)::CapExprSet&, clang::LocksExcludedAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::VarDecl*)
Line
Count
Source
1373
48
                                       VarDecl *SelfDecl) {
1374
48
  if (Attr->args_size() == 0) {
1375
    // The mutex held is the "this" object.
1376
0
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1377
0
    if (Cp.isInvalid()) {
1378
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1379
0
       return;
1380
0
    }
1381
    //else
1382
0
    if (!Cp.shouldIgnore())
1383
0
      Mtxs.push_back_nodup(Cp);
1384
0
    return;
1385
0
  }
1386
1387
48
  for (const auto *Arg : Attr->args()) {
1388
48
    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1389
48
    if (Cp.isInvalid()) {
1390
0
       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1391
0
       continue;
1392
0
    }
1393
    //else
1394
48
    if (!Cp.shouldIgnore())
1395
48
      Mtxs.push_back_nodup(Cp);
1396
48
  }
1397
48
}
1398
1399
/// Extract the list of mutexIDs from a trylock attribute.  If the
1400
/// trylock applies to the given edge, then push them onto Mtxs, discarding
1401
/// any duplicates.
1402
template <class AttrType>
1403
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1404
                                       const Expr *Exp, const NamedDecl *D,
1405
                                       const CFGBlock *PredBlock,
1406
                                       const CFGBlock *CurrBlock,
1407
336
                                       Expr *BrE, bool Neg) {
1408
  // Find out which branch has the lock
1409
336
  bool branch = false;
1410
336
  if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1411
272
    branch = BLE->getValue();
1412
64
  else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1413
64
    branch = ILE->getValue().getBoolValue();
1414
1415
336
  int branchnum = branch ? 0 : 
10
;
1416
336
  if (Neg)
1417
144
    branchnum = !branchnum;
1418
1419
  // If we've taken the trylock branch, then add the lock
1420
336
  int i = 0;
1421
336
  for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1422
1.00k
       SE = PredBlock->succ_end(); SI != SE && 
i < 2672
;
++SI, ++i672
) {
1423
672
    if (*SI == CurrBlock && 
i == branchnum336
)
1424
168
      getMutexIDs(Mtxs, Attr, Exp, D);
1425
672
  }
1426
336
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::TryAcquireCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::TryAcquireCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::CFGBlock const*, clang::CFGBlock const*, clang::Expr*, bool)
Line
Count
Source
1407
168
                                       Expr *BrE, bool Neg) {
1408
  // Find out which branch has the lock
1409
168
  bool branch = false;
1410
168
  if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1411
136
    branch = BLE->getValue();
1412
32
  else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1413
32
    branch = ILE->getValue().getBoolValue();
1414
1415
168
  int branchnum = branch ? 0 : 
10
;
1416
168
  if (Neg)
1417
72
    branchnum = !branchnum;
1418
1419
  // If we've taken the trylock branch, then add the lock
1420
168
  int i = 0;
1421
168
  for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1422
504
       SE = PredBlock->succ_end(); SI != SE && 
i < 2336
;
++SI, ++i336
) {
1423
336
    if (*SI == CurrBlock && 
i == branchnum168
)
1424
84
      getMutexIDs(Mtxs, Attr, Exp, D);
1425
336
  }
1426
168
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::ExclusiveTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::ExclusiveTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::CFGBlock const*, clang::CFGBlock const*, clang::Expr*, bool)
Line
Count
Source
1407
156
                                       Expr *BrE, bool Neg) {
1408
  // Find out which branch has the lock
1409
156
  bool branch = false;
1410
156
  if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1411
124
    branch = BLE->getValue();
1412
32
  else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1413
32
    branch = ILE->getValue().getBoolValue();
1414
1415
156
  int branchnum = branch ? 0 : 
10
;
1416
156
  if (Neg)
1417
68
    branchnum = !branchnum;
1418
1419
  // If we've taken the trylock branch, then add the lock
1420
156
  int i = 0;
1421
156
  for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1422
468
       SE = PredBlock->succ_end(); SI != SE && 
i < 2312
;
++SI, ++i312
) {
1423
312
    if (*SI == CurrBlock && 
i == branchnum156
)
1424
78
      getMutexIDs(Mtxs, Attr, Exp, D);
1425
312
  }
1426
156
}
ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::SharedTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::SharedTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::CFGBlock const*, clang::CFGBlock const*, clang::Expr*, bool)
Line
Count
Source
1407
12
                                       Expr *BrE, bool Neg) {
1408
  // Find out which branch has the lock
1409
12
  bool branch = false;
1410
12
  if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1411
12
    branch = BLE->getValue();
1412
0
  else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1413
0
    branch = ILE->getValue().getBoolValue();
1414
1415
12
  int branchnum = branch ? 0 : 
10
;
1416
12
  if (Neg)
1417
4
    branchnum = !branchnum;
1418
1419
  // If we've taken the trylock branch, then add the lock
1420
12
  int i = 0;
1421
12
  for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1422
36
       SE = PredBlock->succ_end(); SI != SE && 
i < 224
;
++SI, ++i24
) {
1423
24
    if (*SI == CurrBlock && 
i == branchnum12
)
1424
6
      getMutexIDs(Mtxs, Attr, Exp, D);
1425
24
  }
1426
12
}
1427
1428
608
static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1429
608
  if (isa<CXXNullPtrLiteralExpr>(E) || 
isa<GNUNullExpr>(E)600
) {
1430
8
    TCond = false;
1431
8
    return true;
1432
600
  } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1433
16
    TCond = BLE->getValue();
1434
16
    return true;
1435
584
  } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1436
416
    TCond = ILE->getValue().getBoolValue();
1437
416
    return true;
1438
416
  } else 
if (auto *168
CE168
= dyn_cast<ImplicitCastExpr>(E))
1439
136
    return getStaticBooleanValue(CE->getSubExpr(), TCond);
1440
32
  return false;
1441
608
}
1442
1443
// If Cond can be traced back to a function call, return the call expression.
1444
// The negate variable should be called with false, and will be set to true
1445
// if the function call is negated, e.g. if (!mu.tryLock(...))
1446
const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1447
                                                         LocalVarContext C,
1448
3.48k
                                                         bool &Negate) {
1449
3.48k
  if (!Cond)
1450
124
    return nullptr;
1451
1452
3.35k
  if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1453
782
    if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1454
16
      return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1455
766
    return CallExp;
1456
782
  }
1457
2.57k
  else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1458
32
    return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1459
2.54k
  else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1460
996
    return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1461
1.54k
  else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1462
8
    return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1463
1.54k
  else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1464
292
    const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1465
292
    return getTrylockCallExpr(E, C, Negate);
1466
292
  }
1467
1.24k
  else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1468
160
    if (UOP->getOpcode() == UO_LNot) {
1469
128
      Negate = !Negate;
1470
128
      return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1471
128
    }
1472
32
    return nullptr;
1473
160
  }
1474
1.08k
  else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1475
592
    if (BOP->getOpcode() == BO_EQ || 
BOP->getOpcode() == BO_NE216
) {
1476
400
      if (BOP->getOpcode() == BO_NE)
1477
24
        Negate = !Negate;
1478
1479
400
      bool TCond = false;
1480
400
      if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1481
376
        if (!TCond) 
Negate = !Negate208
;
1482
376
        return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1483
376
      }
1484
24
      TCond = false;
1485
24
      if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1486
16
        if (!TCond) Negate = !Negate;
1487
16
        return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1488
16
      }
1489
8
      return nullptr;
1490
24
    }
1491
192
    if (BOP->getOpcode() == BO_LAnd) {
1492
      // LHS must have been evaluated in a different block.
1493
72
      return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1494
72
    }
1495
120
    if (BOP->getOpcode() == BO_LOr)
1496
24
      return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1497
96
    return nullptr;
1498
496
  } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1499
24
    bool TCond, FCond;
1500
24
    if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1501
24
        getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1502
24
      if (TCond && 
!FCond8
)
1503
8
        return getTrylockCallExpr(COP->getCond(), C, Negate);
1504
16
      if (!TCond && FCond) {
1505
16
        Negate = !Negate;
1506
16
        return getTrylockCallExpr(COP->getCond(), C, Negate);
1507
16
      }
1508
16
    }
1509
24
  }
1510
472
  return nullptr;
1511
3.35k
}
1512
1513
/// Find the lockset that holds on the edge between PredBlock
1514
/// and CurrBlock.  The edge set is the exit set of PredBlock (passed
1515
/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1516
void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1517
                                          const FactSet &ExitSet,
1518
                                          const CFGBlock *PredBlock,
1519
6.98k
                                          const CFGBlock *CurrBlock) {
1520
6.98k
  Result = ExitSet;
1521
1522
6.98k
  const Stmt *Cond = PredBlock->getTerminatorCondition();
1523
  // We don't acquire try-locks on ?: branches, only when its result is used.
1524
6.98k
  if (!Cond || 
isa<ConditionalOperator>(PredBlock->getTerminatorStmt())1.57k
)
1525
5.49k
    return;
1526
1527
1.49k
  bool Negate = false;
1528
1.49k
  const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1529
1.49k
  const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1530
1.49k
  StringRef CapDiagKind = "mutex";
1531
1532
1.49k
  const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1533
1.49k
  if (!Exp)
1534
732
    return;
1535
1536
766
  auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1537
766
  if(!FunDecl || !FunDecl->hasAttrs())
1538
446
    return;
1539
1540
320
  CapExprSet ExclusiveLocksToAdd;
1541
320
  CapExprSet SharedLocksToAdd;
1542
1543
  // If the condition is a call to a Trylock function, then grab the attributes
1544
336
  for (const auto *Attr : FunDecl->attrs()) {
1545
336
    switch (Attr->getKind()) {
1546
168
      case attr::TryAcquireCapability: {
1547
168
        auto *A = cast<TryAcquireCapabilityAttr>(Attr);
1548
168
        getMutexIDs(A->isShared() ? 
SharedLocksToAdd12
:
ExclusiveLocksToAdd156
, A,
1549
168
                    Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
1550
168
                    Negate);
1551
168
        CapDiagKind = ClassifyDiagnostic(A);
1552
168
        break;
1553
0
      };
1554
156
      case attr::ExclusiveTrylockFunction: {
1555
156
        const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
1556
156
        getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
1557
156
                    PredBlock, CurrBlock, A->getSuccessValue(), Negate);
1558
156
        CapDiagKind = ClassifyDiagnostic(A);
1559
156
        break;
1560
0
      }
1561
12
      case attr::SharedTrylockFunction: {
1562
12
        const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
1563
12
        getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
1564
12
                    PredBlock, CurrBlock, A->getSuccessValue(), Negate);
1565
12
        CapDiagKind = ClassifyDiagnostic(A);
1566
12
        break;
1567
0
      }
1568
0
      default:
1569
0
        break;
1570
336
    }
1571
336
  }
1572
1573
  // Add and remove locks.
1574
320
  SourceLocation Loc = Exp->getExprLoc();
1575
320
  for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1576
156
    addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
1577
156
                                                         LK_Exclusive, Loc),
1578
156
            CapDiagKind);
1579
320
  for (const auto &SharedLockToAdd : SharedLocksToAdd)
1580
12
    addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd,
1581
12
                                                         LK_Shared, Loc),
1582
12
            CapDiagKind);
1583
320
}
1584
1585
namespace {
1586
1587
/// We use this class to visit different types of expressions in
1588
/// CFGBlocks, and build up the lockset.
1589
/// An expression may cause us to add or remove locks from the lockset, or else
1590
/// output error messages related to missing locks.
1591
/// FIXME: In future, we may be able to not inherit from a visitor.
1592
class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1593
  friend class ThreadSafetyAnalyzer;
1594
1595
  ThreadSafetyAnalyzer *Analyzer;
1596
  FactSet FSet;
1597
  LocalVariableMap::Context LVarCtx;
1598
  unsigned CtxIndex;
1599
1600
  // helper functions
1601
  void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
1602
                          Expr *MutexExp, ProtectedOperationKind POK,
1603
                          StringRef DiagKind, SourceLocation Loc);
1604
  void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
1605
                       StringRef DiagKind);
1606
1607
  void checkAccess(const Expr *Exp, AccessKind AK,
1608
                   ProtectedOperationKind POK = POK_VarAccess);
1609
  void checkPtAccess(const Expr *Exp, AccessKind AK,
1610
                     ProtectedOperationKind POK = POK_VarAccess);
1611
1612
  void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
1613
  void examineArguments(const FunctionDecl *FD,
1614
                        CallExpr::const_arg_iterator ArgBegin,
1615
                        CallExpr::const_arg_iterator ArgEnd,
1616
                        bool SkipFirstParam = false);
1617
1618
public:
1619
  BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
1620
      : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1621
8.70k
        LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {}
1622
1623
  void VisitUnaryOperator(const UnaryOperator *UO);
1624
  void VisitBinaryOperator(const BinaryOperator *BO);
1625
  void VisitCastExpr(const CastExpr *CE);
1626
  void VisitCallExpr(const CallExpr *Exp);
1627
  void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1628
  void VisitDeclStmt(const DeclStmt *S);
1629
};
1630
1631
} // namespace
1632
1633
/// Warn if the LSet does not contain a lock sufficient to protect access
1634
/// of at least the passed in AccessKind.
1635
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
1636
                                      AccessKind AK, Expr *MutexExp,
1637
                                      ProtectedOperationKind POK,
1638
3.10k
                                      StringRef DiagKind, SourceLocation Loc) {
1639
3.10k
  LockKind LK = getLockKindFromAccessKind(AK);
1640
1641
3.10k
  CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
1642
3.10k
  if (Cp.isInvalid()) {
1643
0
    warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
1644
0
    return;
1645
3.10k
  } else if (Cp.shouldIgnore()) {
1646
20
    return;
1647
20
  }
1648
1649
3.08k
  if (Cp.negative()) {
1650
    // Negative capabilities act like locks excluded
1651
70
    const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
1652
70
    if (LDat) {
1653
10
      Analyzer->Handler.handleFunExcludesLock(
1654
10
          DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
1655
10
      return;
1656
10
    }
1657
1658
    // If this does not refer to a negative capability in the same class,
1659
    // then stop here.
1660
60
    if (!Analyzer->inCurrentScope(Cp))
1661
18
      return;
1662
1663
    // Otherwise the negative requirement must be propagated to the caller.
1664
42
    LDat = FSet.findLock(Analyzer->FactMan, Cp);
1665
42
    if (!LDat) {
1666
14
      Analyzer->Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1667
14
    }
1668
42
    return;
1669
60
  }
1670
1671
3.01k
  const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
1672
3.01k
  bool NoError = true;
1673
3.01k
  if (!LDat) {
1674
    // No exact match found.  Look for a partial match.
1675
1.16k
    LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
1676
1.16k
    if (LDat) {
1677
      // Warn that there's no precise match.
1678
76
      std::string PartMatchStr = LDat->toString();
1679
76
      StringRef   PartMatchName(PartMatchStr);
1680
76
      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
1681
76
                                           LK, Loc, &PartMatchName);
1682
1.08k
    } else {
1683
      // Warn that there's no match at all.
1684
1.08k
      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
1685
1.08k
                                           LK, Loc);
1686
1.08k
    }
1687
1.16k
    NoError = false;
1688
1.16k
  }
1689
  // Make sure the mutex we found is the right kind.
1690
3.01k
  if (NoError && 
LDat1.85k
&&
!LDat->isAtLeast(LK)1.85k
) {
1691
60
    Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
1692
60
                                         LK, Loc);
1693
60
  }
1694
3.01k
}
1695
1696
/// Warn if the LSet contains the given lock.
1697
void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
1698
174
                                   Expr *MutexExp, StringRef DiagKind) {
1699
174
  CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
1700
174
  if (Cp.isInvalid()) {
1701
0
    warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
1702
0
    return;
1703
174
  } else if (Cp.shouldIgnore()) {
1704
0
    return;
1705
0
  }
1706
1707
174
  const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp);
1708
174
  if (LDat) {
1709
67
    Analyzer->Handler.handleFunExcludesLock(
1710
67
        DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
1711
67
  }
1712
174
}
1713
1714
/// Checks guarded_by and pt_guarded_by attributes.
1715
/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1716
/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1717
/// Similarly, we check if the access is to an expression that dereferences
1718
/// a pointer marked with pt_guarded_by.
1719
void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
1720
9.49k
                               ProtectedOperationKind POK) {
1721
9.49k
  Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1722
1723
9.49k
  SourceLocation Loc = Exp->getExprLoc();
1724
1725
  // Local variables of reference type cannot be re-assigned;
1726
  // map them to their initializer.
1727
9.54k
  while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1728
4.41k
    const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1729
4.41k
    if (VD && VD->isLocalVarDecl() && 
VD->getType()->isReferenceType()1.91k
) {
1730
44
      if (const auto *E = VD->getInit()) {
1731
        // Guard against self-initialization. e.g., int &i = i;
1732
44
        if (E == Exp)
1733
0
          break;
1734
44
        Exp = E;
1735
44
        continue;
1736
44
      }
1737
44
    }
1738
4.36k
    break;
1739
4.41k
  }
1740
1741
9.49k
  if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1742
    // For dereferences
1743
250
    if (UO->getOpcode() == UO_Deref)
1744
250
      checkPtAccess(UO->getSubExpr(), AK, POK);
1745
250
    return;
1746
250
  }
1747
1748
9.24k
  if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1749
68
    checkPtAccess(AE->getLHS(), AK, POK);
1750
68
    return;
1751
68
  }
1752
1753
9.18k
  if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1754
4.28k
    if (ME->isArrow())
1755
3.48k
      checkPtAccess(ME->getBase(), AK, POK);
1756
799
    else
1757
799
      checkAccess(ME->getBase(), AK, POK);
1758
4.28k
  }
1759
1760
9.18k
  const ValueDecl *D = getValueDecl(Exp);
1761
9.18k
  if (!D || 
!D->hasAttrs()8.65k
)
1762
6.59k
    return;
1763
1764
2.58k
  if (D->hasAttr<GuardedVarAttr>() && 
FSet.isEmpty(Analyzer->FactMan)48
) {
1765
25
    Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
1766
25
  }
1767
1768
2.58k
  for (const auto *I : D->specific_attrs<GuardedByAttr>())
1769
2.03k
    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
1770
2.03k
                       ClassifyDiagnostic(I), Loc);
1771
2.58k
}
1772
1773
/// Checks pt_guarded_by and pt_guarded_var attributes.
1774
/// POK is the same  operationKind that was passed to checkAccess.
1775
void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
1776
4.93k
                                 ProtectedOperationKind POK) {
1777
6.12k
  while (true) {
1778
6.12k
    if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1779
4
      Exp = PE->getSubExpr();
1780
4
      continue;
1781
4
    }
1782
6.12k
    if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1783
1.25k
      if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1784
        // If it's an actual array, and not a pointer, then it's elements
1785
        // are protected by GUARDED_BY, not PT_GUARDED_BY;
1786
60
        checkAccess(CE->getSubExpr(), AK, POK);
1787
60
        return;
1788
60
      }
1789
1.19k
      Exp = CE->getSubExpr();
1790
1.19k
      continue;
1791
1.25k
    }
1792
4.87k
    break;
1793
6.12k
  }
1794
1795
  // Pass by reference warnings are under a different flag.
1796
4.87k
  ProtectedOperationKind PtPOK = POK_VarDereference;
1797
4.87k
  if (POK == POK_PassByRef) 
PtPOK = POK_PtPassByRef130
;
1798
1799
4.87k
  const ValueDecl *D = getValueDecl(Exp);
1800
4.87k
  if (!D || 
!D->hasAttrs()1.16k
)
1801
4.42k
    return;
1802
1803
442
  if (D->hasAttr<PtGuardedVarAttr>() && 
FSet.isEmpty(Analyzer->FactMan)10
)
1804
9
    Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
1805
9
                                        Exp->getExprLoc());
1806
1807
442
  for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1808
388
    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
1809
388
                       ClassifyDiagnostic(I), Exp->getExprLoc());
1810
442
}
1811
1812
/// Process a function call, method call, constructor call,
1813
/// or destructor call.  This involves looking at the attributes on the
1814
/// corresponding function/method/constructor/destructor, issuing warnings,
1815
/// and updating the locksets accordingly.
1816
///
1817
/// FIXME: For classes annotated with one of the guarded annotations, we need
1818
/// to treat const method calls as reads and non-const method calls as writes,
1819
/// and check that the appropriate locks are held. Non-const method calls with
1820
/// the same signature as const method calls can be also treated as reads.
1821
///
1822
void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
1823
4.94k
                              VarDecl *VD) {
1824
4.94k
  SourceLocation Loc = Exp->getExprLoc();
1825
4.94k
  CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
1826
4.94k
  CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
1827
4.94k
  CapExprSet ScopedReqsAndExcludes;
1828
4.94k
  StringRef CapDiagKind = "mutex";
1829
1830
  // Figure out if we're constructing an object of scoped lockable class
1831
4.94k
  bool isScopedVar = false;
1832
4.94k
  if (VD) {
1833
336
    if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) {
1834
336
      const CXXRecordDecl* PD = CD->getParent();
1835
336
      if (PD && PD->hasAttr<ScopedLockableAttr>())
1836
316
        isScopedVar = true;
1837
336
    }
1838
336
  }
1839
1840
6.24k
  for(const Attr *At : D->attrs()) {
1841
6.24k
    switch (At->getKind()) {
1842
      // When we encounter a lock function, we need to add the lock to our
1843
      // lockset.
1844
1.83k
      case attr::AcquireCapability: {
1845
1.83k
        const auto *A = cast<AcquireCapabilityAttr>(At);
1846
1.83k
        Analyzer->getMutexIDs(A->isShared() ? 
SharedLocksToAdd249
1847
1.83k
                                            : 
ExclusiveLocksToAdd1.58k
,
1848
1.83k
                              A, Exp, D, VD);
1849
1850
1.83k
        CapDiagKind = ClassifyDiagnostic(A);
1851
1.83k
        break;
1852
0
      }
1853
1854
      // An assert will add a lock to the lockset, but will not generate
1855
      // a warning if it is already there, and will not generate a warning
1856
      // if it is not removed.
1857
38
      case attr::AssertExclusiveLock: {
1858
38
        const auto *A = cast<AssertExclusiveLockAttr>(At);
1859
1860
38
        CapExprSet AssertLocks;
1861
38
        Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
1862
38
        for (const auto &AssertLock : AssertLocks)
1863
40
          Analyzer->addLock(
1864
40
              FSet,
1865
40
              std::make_unique<LockableFactEntry>(AssertLock, LK_Exclusive, Loc,
1866
40
                                                  FactEntry::Asserted),
1867
40
              ClassifyDiagnostic(A));
1868
38
        break;
1869
0
      }
1870
14
      case attr::AssertSharedLock: {
1871
14
        const auto *A = cast<AssertSharedLockAttr>(At);
1872
1873
14
        CapExprSet AssertLocks;
1874
14
        Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
1875
14
        for (const auto &AssertLock : AssertLocks)
1876
16
          Analyzer->addLock(
1877
16
              FSet,
1878
16
              std::make_unique<LockableFactEntry>(AssertLock, LK_Shared, Loc,
1879
16
                                                  FactEntry::Asserted),
1880
16
              ClassifyDiagnostic(A));
1881
14
        break;
1882
0
      }
1883
1884
52
      case attr::AssertCapability: {
1885
52
        const auto *A = cast<AssertCapabilityAttr>(At);
1886
52
        CapExprSet AssertLocks;
1887
52
        Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
1888
52
        for (const auto &AssertLock : AssertLocks)
1889
56
          Analyzer->addLock(FSet,
1890
56
                            std::make_unique<LockableFactEntry>(
1891
56
                                AssertLock,
1892
56
                                A->isShared() ? 
LK_Shared16
:
LK_Exclusive40
, Loc,
1893
56
                                FactEntry::Asserted),
1894
56
                            ClassifyDiagnostic(A));
1895
52
        break;
1896
0
      }
1897
1898
      // When we encounter an unlock function, we need to remove unlocked
1899
      // mutexes from the lockset, and flag a warning if they are not there.
1900
2.02k
      case attr::ReleaseCapability: {
1901
2.02k
        const auto *A = cast<ReleaseCapabilityAttr>(At);
1902
2.02k
        if (A->isGeneric())
1903
1.83k
          Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
1904
192
        else if (A->isShared())
1905
38
          Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
1906
154
        else
1907
154
          Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
1908
1909
2.02k
        CapDiagKind = ClassifyDiagnostic(A);
1910
2.02k
        break;
1911
0
      }
1912
1913
624
      case attr::RequiresCapability: {
1914
624
        const auto *A = cast<RequiresCapabilityAttr>(At);
1915
688
        for (auto *Arg : A->args()) {
1916
688
          warnIfMutexNotHeld(D, Exp, A->isShared() ? 
AK_Read127
:
AK_Written561
, Arg,
1917
688
                             POK_FunctionCall, ClassifyDiagnostic(A),
1918
688
                             Exp->getExprLoc());
1919
          // use for adopting a lock
1920
688
          if (isScopedVar)
1921
16
            Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
1922
688
        }
1923
624
        break;
1924
0
      }
1925
1926
164
      case attr::LocksExcluded: {
1927
164
        const auto *A = cast<LocksExcludedAttr>(At);
1928
174
        for (auto *Arg : A->args()) {
1929
174
          warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
1930
          // use for deferring a lock
1931
174
          if (isScopedVar)
1932
48
            Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
1933
174
        }
1934
164
        break;
1935
0
      }
1936
1937
      // Ignore attributes unrelated to thread-safety
1938
1.49k
      default:
1939
1.49k
        break;
1940
6.24k
    }
1941
6.24k
  }
1942
1943
  // Remove locks first to allow lock upgrading/downgrading.
1944
  // FIXME -- should only fully remove if the attribute refers to 'this'.
1945
4.94k
  bool Dtor = isa<CXXDestructorDecl>(D);
1946
4.94k
  for (const auto &M : ExclusiveLocksToRemove)
1947
154
    Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
1948
4.94k
  for (const auto &M : SharedLocksToRemove)
1949
38
    Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
1950
4.94k
  for (const auto &M : GenericLocksToRemove)
1951
1.78k
    Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
1952
1953
  // Add locks.
1954
4.94k
  FactEntry::SourceKind Source =
1955
4.94k
      isScopedVar ? 
FactEntry::Managed316
:
FactEntry::Acquired4.63k
;
1956
4.94k
  for (const auto &M : ExclusiveLocksToAdd)
1957
1.54k
    Analyzer->addLock(
1958
1.54k
        FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, Loc, Source),
1959
1.54k
        CapDiagKind);
1960
4.94k
  for (const auto &M : SharedLocksToAdd)
1961
237
    Analyzer->addLock(
1962
237
        FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source),
1963
237
        CapDiagKind);
1964
1965
4.94k
  if (isScopedVar) {
1966
    // Add the managing object as a dummy mutex, mapped to the underlying mutex.
1967
316
    SourceLocation MLoc = VD->getLocation();
1968
316
    DeclRefExpr DRE(VD->getASTContext(), VD, false, VD->getType(), VK_LValue,
1969
316
                    VD->getLocation());
1970
    // FIXME: does this store a pointer to DRE?
1971
316
    CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
1972
1973
316
    auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, MLoc);
1974
316
    for (const auto &M : ExclusiveLocksToAdd)
1975
176
      ScopedEntry->addLock(M);
1976
316
    for (const auto &M : SharedLocksToAdd)
1977
44
      ScopedEntry->addLock(M);
1978
316
    for (const auto &M : ScopedReqsAndExcludes)
1979
64
      ScopedEntry->addLock(M);
1980
316
    for (const auto &M : ExclusiveLocksToRemove)
1981
28
      ScopedEntry->addExclusiveUnlock(M);
1982
316
    for (const auto &M : SharedLocksToRemove)
1983
8
      ScopedEntry->addSharedUnlock(M);
1984
316
    Analyzer->addLock(FSet, std::move(ScopedEntry), CapDiagKind);
1985
316
  }
1986
4.94k
}
1987
1988
/// For unary operations which read and write a variable, we need to
1989
/// check whether we hold any required mutexes. Reads are checked in
1990
/// VisitCastExpr.
1991
840
void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
1992
840
  switch (UO->getOpcode()) {
1993
4
    case UO_PostDec:
1994
12
    case UO_PostInc:
1995
16
    case UO_PreDec:
1996
60
    case UO_PreInc:
1997
60
      checkAccess(UO->getSubExpr(), AK_Written);
1998
60
      break;
1999
780
    default:
2000
780
      break;
2001
840
  }
2002
840
}
2003
2004
/// For binary operations which assign to a variable (writes), we need to check
2005
/// whether we hold any required mutexes.
2006
/// FIXME: Deal with non-primitive types.
2007
2.10k
void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2008
2.10k
  if (!BO->isAssignmentOp())
2009
531
    return;
2010
2011
  // adjust the context
2012
1.57k
  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
2013
2014
1.57k
  checkAccess(BO->getLHS(), AK_Written);
2015
1.57k
}
2016
2017
/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2018
/// need to ensure we hold any required mutexes.
2019
/// FIXME: Deal with non-primitive types.
2020
5.99k
void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2021
5.99k
  if (CE->getCastKind() != CK_LValueToRValue)
2022
3.09k
    return;
2023
2.89k
  checkAccess(CE->getSubExpr(), AK_Read);
2024
2.89k
}
2025
2026
void BuildLockset::examineArguments(const FunctionDecl *FD,
2027
                                    CallExpr::const_arg_iterator ArgBegin,
2028
                                    CallExpr::const_arg_iterator ArgEnd,
2029
6.40k
                                    bool SkipFirstParam) {
2030
  // Currently we can't do anything if we don't know the function declaration.
2031
6.40k
  if (!FD)
2032
12
    return;
2033
2034
  // NO_THREAD_SAFETY_ANALYSIS does double duty here.  Normally it
2035
  // only turns off checking within the body of a function, but we also
2036
  // use it to turn off checking in arguments to the function.  This
2037
  // could result in some false negatives, but the alternative is to
2038
  // create yet another attribute.
2039
6.39k
  if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2040
40
    return;
2041
2042
6.35k
  const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2043
6.35k
  auto Param = Params.begin();
2044
6.35k
  if (SkipFirstParam)
2045
12
    ++Param;
2046
2047
  // There can be default arguments, so we stop when one iterator is at end().
2048
8.47k
  for (auto Arg = ArgBegin; Param != Params.end() && 
Arg != ArgEnd2.12k
;
2049
6.35k
       
++Param, ++Arg2.12k
) {
2050
2.12k
    QualType Qt = (*Param)->getType();
2051
2.12k
    if (Qt->isReferenceType())
2052
295
      checkAccess(*Arg, AK_Read, POK_PassByRef);
2053
2.12k
  }
2054
6.35k
}
2055
2056
5.75k
void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2057
5.75k
  if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2058
4.08k
    const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2059
    // ME can be null when calling a method pointer
2060
4.08k
    const CXXMethodDecl *MD = CE->getMethodDecl();
2061
2062
4.08k
    if (ME && 
MD4.08k
) {
2063
4.08k
      if (ME->isArrow()) {
2064
        // Should perhaps be AK_Written if !MD->isConst().
2065
774
        checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2066
3.30k
      } else {
2067
        // Should perhaps be AK_Written if !MD->isConst().
2068
3.30k
        checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2069
3.30k
      }
2070
4.08k
    }
2071
2072
4.08k
    examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2073
4.08k
  } else 
if (const auto *1.66k
OE1.66k
= dyn_cast<CXXOperatorCallExpr>(Exp)) {
2074
448
    auto OEop = OE->getOperator();
2075
448
    switch (OEop) {
2076
32
      case OO_Equal: {
2077
32
        const Expr *Target = OE->getArg(0);
2078
32
        const Expr *Source = OE->getArg(1);
2079
32
        checkAccess(Target, AK_Written);
2080
32
        checkAccess(Source, AK_Read);
2081
32
        break;
2082
0
      }
2083
116
      case OO_Star:
2084
276
      case OO_Arrow:
2085
360
      case OO_Subscript:
2086
360
        if (!(OEop == OO_Star && 
OE->getNumArgs() > 1116
)) {
2087
          // Grrr.  operator* can be multiplication...
2088
356
          checkPtAccess(OE->getArg(0), AK_Read);
2089
356
        }
2090
360
        LLVM_FALLTHROUGH;
2091
416
      default: {
2092
        // TODO: get rid of this, and rely on pass-by-ref instead.
2093
416
        const Expr *Obj = OE->getArg(0);
2094
416
        checkAccess(Obj, AK_Read);
2095
        // Check the remaining arguments. For method operators, the first
2096
        // argument is the implicit self argument, and doesn't appear in the
2097
        // FunctionDecl, but for non-methods it does.
2098
416
        const FunctionDecl *FD = OE->getDirectCallee();
2099
416
        examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2100
416
                         /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2101
416
        break;
2102
360
      }
2103
448
    }
2104
1.21k
  } else {
2105
1.21k
    examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2106
1.21k
  }
2107
2108
5.75k
  auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2109
5.75k
  if(!D || !D->hasAttrs())
2110
1.45k
    return;
2111
4.29k
  handleCall(Exp, D);
2112
4.29k
}
2113
2114
717
void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2115
717
  const CXXConstructorDecl *D = Exp->getConstructor();
2116
717
  if (D && D->isCopyConstructor()) {
2117
31
    const Expr* Source = Exp->getArg(0);
2118
31
    checkAccess(Source, AK_Read);
2119
686
  } else {
2120
686
    examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2121
686
  }
2122
717
}
2123
2124
static CXXConstructorDecl *
2125
4
findConstructorForByValueReturn(const CXXRecordDecl *RD) {
2126
  // Prefer a move constructor over a copy constructor. If there's more than
2127
  // one copy constructor or more than one move constructor, we arbitrarily
2128
  // pick the first declared such constructor rather than trying to guess which
2129
  // one is more appropriate.
2130
4
  CXXConstructorDecl *CopyCtor = nullptr;
2131
8
  for (auto *Ctor : RD->ctors()) {
2132
8
    if (Ctor->isDeleted())
2133
0
      continue;
2134
8
    if (Ctor->isMoveConstructor())
2135
4
      return Ctor;
2136
4
    if (!CopyCtor && Ctor->isCopyConstructor())
2137
0
      CopyCtor = Ctor;
2138
4
  }
2139
0
  return CopyCtor;
2140
4
}
2141
2142
static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args,
2143
4
                               SourceLocation Loc) {
2144
4
  ASTContext &Ctx = CD->getASTContext();
2145
4
  return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc,
2146
4
                                  CD, true, Args, false, false, false, false,
2147
4
                                  CXXConstructExpr::CK_Complete,
2148
4
                                  SourceRange(Loc, Loc));
2149
4
}
2150
2151
1.03k
void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2152
  // adjust the context
2153
1.03k
  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
2154
2155
1.03k
  for (auto *D : S->getDeclGroup()) {
2156
1.03k
    if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2157
1.03k
      Expr *E = VD->getInit();
2158
1.03k
      if (!E)
2159
97
        continue;
2160
935
      E = E->IgnoreParens();
2161
2162
      // handle constructors that involve temporaries
2163
935
      if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2164
83
        E = EWC->getSubExpr()->IgnoreParens();
2165
935
      if (auto *CE = dyn_cast<CastExpr>(E))
2166
194
        if (CE->getCastKind() == CK_NoOp ||
2167
194
            
CE->getCastKind() == CK_ConstructorConversion188
||
2168
194
            
CE->getCastKind() == CK_UserDefinedConversion186
)
2169
10
          E = CE->getSubExpr()->IgnoreParens();
2170
935
      if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2171
12
        E = BTE->getSubExpr()->IgnoreParens();
2172
2173
935
      if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
2174
555
        const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
2175
555
        if (!CtorD || !CtorD->hasAttrs())
2176
223
          continue;
2177
332
        handleCall(E, CtorD, VD);
2178
380
      } else if (isa<CallExpr>(E) && 
E->isPRValue()72
) {
2179
        // If the object is initialized by a function call that returns a
2180
        // scoped lockable by value, use the attributes on the copy or move
2181
        // constructor to figure out what effect that should have on the
2182
        // lockset.
2183
        // FIXME: Is this really the best way to handle this situation?
2184
72
        auto *RD = E->getType()->getAsCXXRecordDecl();
2185
72
        if (!RD || 
!RD->hasAttr<ScopedLockableAttr>()4
)
2186
68
          continue;
2187
4
        CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD);
2188
4
        if (!CtorD || !CtorD->hasAttrs())
2189
0
          continue;
2190
4
        handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD);
2191
4
      }
2192
935
    }
2193
1.03k
  }
2194
1.03k
}
2195
2196
/// Given two facts merging on a join point, possibly warn and decide whether to
2197
/// keep or replace.
2198
///
2199
/// \param CanModify Whether we can replace \p A by \p B.
2200
/// \return  false if we should keep \p A, true if we should take \p B.
2201
bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2202
1.16k
                                bool CanModify) {
2203
1.16k
  if (A.kind() != B.kind()) {
2204
    // For managed capabilities, the destructor should unlock in the right mode
2205
    // anyway. For asserted capabilities no unlocking is needed.
2206
68
    if ((A.managed() || 
A.asserted()36
) &&
(48
B.managed()48
||
B.asserted()8
)) {
2207
      // The shared capability subsumes the exclusive capability, if possible.
2208
40
      bool ShouldTakeB = B.kind() == LK_Shared;
2209
40
      if (CanModify || 
!ShouldTakeB8
)
2210
32
        return ShouldTakeB;
2211
40
    }
2212
36
    Handler.handleExclusiveAndShared("mutex", B.toString(), B.loc(), A.loc());
2213
    // Take the exclusive capability to reduce further warnings.
2214
36
    return CanModify && 
B.kind() == LK_Exclusive20
;
2215
1.09k
  } else {
2216
    // The non-asserted capability is the one we want to track.
2217
1.09k
    return CanModify && 
A.asserted()1.00k
&&
!B.asserted()20
;
2218
1.09k
  }
2219
1.16k
}
2220
2221
/// Compute the intersection of two locksets and issue warnings for any
2222
/// locks in the symmetric difference.
2223
///
2224
/// This function is used at a merge point in the CFG when comparing the lockset
2225
/// of each branch being merged. For example, given the following sequence:
2226
/// A; if () then B; else C; D; we need to check that the lockset after B and C
2227
/// are the same. In the event of a difference, we use the intersection of these
2228
/// two locksets at the start of D.
2229
///
2230
/// \param EntrySet A lockset for entry into a (possibly new) block.
2231
/// \param ExitSet The lockset on exiting a preceding block.
2232
/// \param JoinLoc The location of the join point for error reporting
2233
/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2234
/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2235
void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2236
                                            const FactSet &ExitSet,
2237
                                            SourceLocation JoinLoc,
2238
                                            LockErrorKind EntryLEK,
2239
3.12k
                                            LockErrorKind ExitLEK) {
2240
3.12k
  FactSet EntrySetOrig = EntrySet;
2241
2242
  // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2243
3.12k
  for (const auto &Fact : ExitSet) {
2244
2.63k
    const FactEntry &ExitFact = FactMan[Fact];
2245
2246
2.63k
    FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2247
2.63k
    if (EntryIt != EntrySet.end()) {
2248
1.16k
      if (join(FactMan[*EntryIt], ExitFact,
2249
1.16k
               EntryLEK != LEK_LockedSomeLoopIterations))
2250
44
        *EntryIt = Fact;
2251
1.46k
    } else if (!ExitFact.managed()) {
2252
1.43k
      ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2253
1.43k
                                             EntryLEK, Handler);
2254
1.43k
    }
2255
2.63k
  }
2256
2257
  // Find locks in EntrySet that are not in ExitSet, and remove them.
2258
3.12k
  for (const auto &Fact : EntrySetOrig) {
2259
1.41k
    const FactEntry *EntryFact = &FactMan[Fact];
2260
1.41k
    const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2261
2262
1.41k
    if (!ExitFact) {
2263
248
      if (!EntryFact->managed() || 
ExitLEK == LEK_LockedSomeLoopIterations28
)
2264
228
        EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2265
228
                                                 ExitLEK, Handler);
2266
248
      if (ExitLEK == LEK_LockedSomePredecessors)
2267
186
        EntrySet.removeLock(FactMan, *EntryFact);
2268
248
    }
2269
1.41k
  }
2270
3.12k
}
2271
2272
// Return true if block B never continues to its successors.
2273
7.01k
static bool neverReturns(const CFGBlock *B) {
2274
7.01k
  if (B->hasNoReturnElement())
2275
24
    return true;
2276
6.99k
  if (B->empty())
2277
2.38k
    return false;
2278
2279
4.60k
  CFGElement Last = B->back();
2280
4.60k
  if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2281
4.27k
    if (isa<CXXThrowExpr>(S->getStmt()))
2282
4
      return true;
2283
4.27k
  }
2284
4.60k
  return false;
2285
4.60k
}
2286
2287
/// Check a function's CFG for thread-safety violations.
2288
///
2289
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2290
/// at the end of each block, and issue warnings for thread safety violations.
2291
/// Each block in the CFG is traversed exactly once.
2292
2.52k
void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2293
  // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2294
  // For now, we just use the walker to set things up.
2295
2.52k
  threadSafety::CFGWalker walker;
2296
2.52k
  if (!walker.init(AC))
2297
3
    return;
2298
2299
  // AC.dumpCFG(true);
2300
  // threadSafety::printSCFG(walker);
2301
2302
2.52k
  CFG *CFGraph = walker.getGraph();
2303
2.52k
  const NamedDecl *D = walker.getDecl();
2304
2.52k
  const auto *CurrentFunction = dyn_cast<FunctionDecl>(D);
2305
2.52k
  CurrentMethod = dyn_cast<CXXMethodDecl>(D);
2306
2307
2.52k
  if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2308
36
    return;
2309
2310
  // FIXME: Do something a bit more intelligent inside constructor and
2311
  // destructor code.  Constructors and destructors must assume unique access
2312
  // to 'this', so checks on member variable access is disabled, but we should
2313
  // still enable checks on other objects.
2314
2.48k
  if (isa<CXXConstructorDecl>(D))
2315
51
    return;  // Don't check inside constructors.
2316
2.43k
  if (isa<CXXDestructorDecl>(D))
2317
26
    return;  // Don't check inside destructors.
2318
2319
2.40k
  Handler.enterFunction(CurrentFunction);
2320
2321
2.40k
  BlockInfo.resize(CFGraph->getNumBlockIDs(),
2322
2.40k
    CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2323
2324
  // We need to explore the CFG via a "topological" ordering.
2325
  // That way, we will be guaranteed to have information about required
2326
  // predecessor locksets when exploring a new block.
2327
2.40k
  const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2328
2.40k
  PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2329
2330
  // Mark entry block as reachable
2331
2.40k
  BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
2332
2333
  // Compute SSA names for local variables
2334
2.40k
  LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2335
2336
  // Fill in source locations for all CFGBlocks.
2337
2.40k
  findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2338
2339
2.40k
  CapExprSet ExclusiveLocksAcquired;
2340
2.40k
  CapExprSet SharedLocksAcquired;
2341
2.40k
  CapExprSet LocksReleased;
2342
2343
  // Add locks from exclusive_locks_required and shared_locks_required
2344
  // to initial lockset. Also turn off checking for lock and unlock functions.
2345
  // FIXME: is there a more intelligent way to check lock/unlock functions?
2346
2.40k
  if (!SortedGraph->empty() && D->hasAttrs()) {
2347
554
    const CFGBlock *FirstBlock = *SortedGraph->begin();
2348
554
    FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
2349
2350
554
    CapExprSet ExclusiveLocksToAdd;
2351
554
    CapExprSet SharedLocksToAdd;
2352
554
    StringRef CapDiagKind = "mutex";
2353
2354
554
    SourceLocation Loc = D->getLocation();
2355
688
    for (const auto *Attr : D->attrs()) {
2356
688
      Loc = Attr->getLocation();
2357
688
      if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2358
414
        getMutexIDs(A->isShared() ? 
SharedLocksToAdd59
:
ExclusiveLocksToAdd355
, A,
2359
414
                    nullptr, D);
2360
414
        CapDiagKind = ClassifyDiagnostic(A);
2361
414
      } else 
if (const auto *274
A274
= dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2362
        // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2363
        // We must ignore such methods.
2364
91
        if (A->args_size() == 0)
2365
14
          return;
2366
77
        getMutexIDs(A->isShared() ? 
SharedLocksToAdd8
:
ExclusiveLocksToAdd69
, A,
2367
77
                    nullptr, D);
2368
77
        getMutexIDs(LocksReleased, A, nullptr, D);
2369
77
        CapDiagKind = ClassifyDiagnostic(A);
2370
183
      } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2371
106
        if (A->args_size() == 0)
2372
18
          return;
2373
88
        getMutexIDs(A->isShared() ? 
SharedLocksAcquired28
2374
88
                                  : 
ExclusiveLocksAcquired60
,
2375
88
                    A, nullptr, D);
2376
88
        CapDiagKind = ClassifyDiagnostic(A);
2377
88
      } else 
if (77
isa<ExclusiveTrylockFunctionAttr>(Attr)77
) {
2378
        // Don't try to check trylock functions for now.
2379
4
        return;
2380
73
      } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
2381
        // Don't try to check trylock functions for now.
2382
2
        return;
2383
71
      } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2384
        // Don't try to check trylock functions for now.
2385
6
        return;
2386
6
      }
2387
688
    }
2388
2389
    // FIXME -- Loc can be wrong here.
2390
510
    for (const auto &Mu : ExclusiveLocksToAdd) {
2391
436
      auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc,
2392
436
                                                       FactEntry::Declared);
2393
436
      addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
2394
436
    }
2395
510
    for (const auto &Mu : SharedLocksToAdd) {
2396
83
      auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc,
2397
83
                                                       FactEntry::Declared);
2398
83
      addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
2399
83
    }
2400
510
  }
2401
2402
8.72k
  
for (const auto *CurrBlock : *SortedGraph)2.36k
{
2403
8.72k
    unsigned CurrBlockID = CurrBlock->getBlockID();
2404
8.72k
    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2405
2406
    // Use the default initial lockset in case there are no predecessors.
2407
8.72k
    VisitedBlocks.insert(CurrBlock);
2408
2409
    // Iterate through the predecessor blocks and warn if the lockset for all
2410
    // predecessors is not the same. We take the entry lockset of the current
2411
    // block to be the intersection of all previous locksets.
2412
    // FIXME: By keeping the intersection, we may output more errors in future
2413
    // for a lock which is not in the intersection, but was in the union. We
2414
    // may want to also keep the union in future. As an example, let's say
2415
    // the intersection contains Mutex L, and the union contains L and M.
2416
    // Later we unlock M. At this point, we would output an error because we
2417
    // never locked M; although the real error is probably that we forgot to
2418
    // lock M on all code paths. Conversely, let's say that later we lock M.
2419
    // In this case, we should compare against the intersection instead of the
2420
    // union because the real error is probably that we forgot to unlock M on
2421
    // all code paths.
2422
8.72k
    bool LocksetInitialized = false;
2423
8.72k
    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2424
15.8k
         PE  = CurrBlock->pred_end(); PI != PE; 
++PI7.15k
) {
2425
      // if *PI -> CurrBlock is a back edge
2426
7.15k
      if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2427
142
        continue;
2428
2429
7.01k
      unsigned PrevBlockID = (*PI)->getBlockID();
2430
7.01k
      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2431
2432
      // Ignore edges from blocks that can't return.
2433
7.01k
      if (neverReturns(*PI) || 
!PrevBlockInfo->Reachable6.98k
)
2434
28
        continue;
2435
2436
      // Okay, we can reach this block from the entry.
2437
6.98k
      CurrBlockInfo->Reachable = true;
2438
2439
6.98k
      FactSet PrevLockset;
2440
6.98k
      getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2441
2442
6.98k
      if (!LocksetInitialized) {
2443
6.34k
        CurrBlockInfo->EntrySet = PrevLockset;
2444
6.34k
        LocksetInitialized = true;
2445
6.34k
      } else {
2446
        // Surprisingly 'continue' doesn't always produce back edges, because
2447
        // the CFG has empty "transition" blocks where they meet with the end
2448
        // of the regular loop body. We still want to diagnose them as loop.
2449
644
        intersectAndWarn(
2450
644
            CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2451
644
            isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2452
644
                ? 
LEK_LockedSomeLoopIterations16
2453
644
                : 
LEK_LockedSomePredecessors628
);
2454
644
      }
2455
6.98k
    }
2456
2457
    // Skip rest of block if it's not reachable.
2458
8.72k
    if (!CurrBlockInfo->Reachable)
2459
16
      continue;
2460
2461
8.70k
    BuildLockset LocksetBuilder(this, *CurrBlockInfo);
2462
2463
    // Visit all the statements in the basic block.
2464
38.7k
    for (const auto &BI : *CurrBlock) {
2465
38.7k
      switch (BI.getKind()) {
2466
38.3k
        case CFGElement::Statement: {
2467
38.3k
          CFGStmt CS = BI.castAs<CFGStmt>();
2468
38.3k
          LocksetBuilder.Visit(CS.getStmt());
2469
38.3k
          break;
2470
0
        }
2471
        // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
2472
350
        case CFGElement::AutomaticObjectDtor: {
2473
350
          CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2474
350
          const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2475
350
          if (!DD->hasAttrs())
2476
37
            break;
2477
2478
          // Create a dummy expression,
2479
313
          auto *VD = const_cast<VarDecl *>(AD.getVarDecl());
2480
313
          DeclRefExpr DRE(VD->getASTContext(), VD, false,
2481
313
                          VD->getType().getNonReferenceType(), VK_LValue,
2482
313
                          AD.getTriggerStmt()->getEndLoc());
2483
313
          LocksetBuilder.handleCall(&DRE, DD);
2484
313
          break;
2485
350
        }
2486
34
        default:
2487
34
          break;
2488
38.7k
      }
2489
38.7k
    }
2490
8.70k
    CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2491
2492
    // For every back edge from CurrBlock (the end of the loop) to another block
2493
    // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2494
    // the one held at the beginning of FirstLoopBlock. We can look up the
2495
    // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2496
8.70k
    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2497
15.8k
         SE  = CurrBlock->succ_end(); SI != SE; 
++SI7.15k
) {
2498
      // if CurrBlock -> *SI is *not* a back edge
2499
7.15k
      if (*SI == nullptr || 
!VisitedBlocks.alreadySet(*SI)7.15k
)
2500
7.02k
        continue;
2501
2502
134
      CFGBlock *FirstLoopBlock = *SI;
2503
134
      CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2504
134
      CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2505
134
      intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2506
134
                       LEK_LockedSomeLoopIterations);
2507
134
    }
2508
8.70k
  }
2509
2510
2.36k
  CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
2511
2.36k
  CFGBlockInfo *Final   = &BlockInfo[CFGraph->getExit().getBlockID()];
2512
2513
  // Skip the final check if the exit block is unreachable.
2514
2.36k
  if (!Final->Reachable)
2515
20
    return;
2516
2517
  // By default, we expect all locks held on entry to be held on exit.
2518
2.34k
  FactSet ExpectedExitSet = Initial->EntrySet;
2519
2520
  // Adjust the expected exit set by adding or removing locks, as declared
2521
  // by *-LOCK_FUNCTION and UNLOCK_FUNCTION.  The intersect below will then
2522
  // issue the appropriate warning.
2523
  // FIXME: the location here is not quite right.
2524
2.34k
  for (const auto &Lock : ExclusiveLocksAcquired)
2525
52
    ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
2526
52
                                         Lock, LK_Exclusive, D->getLocation()));
2527
2.34k
  for (const auto &Lock : SharedLocksAcquired)
2528
24
    ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
2529
24
                                         Lock, LK_Shared, D->getLocation()));
2530
2.34k
  for (const auto &Lock : LocksReleased)
2531
69
    ExpectedExitSet.removeLock(FactMan, Lock);
2532
2533
  // FIXME: Should we call this function for all blocks which exit the function?
2534
2.34k
  intersectAndWarn(ExpectedExitSet, Final->ExitSet, Final->ExitLoc,
2535
2.34k
                   LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction);
2536
2537
2.34k
  Handler.leaveFunction(CurrentFunction);
2538
2.34k
}
2539
2540
/// Check a function's CFG for thread-safety violations.
2541
///
2542
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2543
/// at the end of each block, and issue warnings for thread safety violations.
2544
/// Each block in the CFG is traversed exactly once.
2545
void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2546
                                           ThreadSafetyHandler &Handler,
2547
2.52k
                                           BeforeSet **BSet) {
2548
2.52k
  if (!*BSet)
2549
35
    *BSet = new BeforeSet;
2550
2.52k
  ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2551
2.52k
  Analyzer.runAnalysis(AC);
2552
2.52k
}
2553
2554
91.1k
void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2555
2556
/// Helper function that returns a LockKind required for the given level
2557
/// of access.
2558
3.14k
LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2559
3.14k
  switch (AK) {
2560
1.26k
    case AK_Read :
2561
1.26k
      return LK_Shared;
2562
1.88k
    case AK_Written :
2563
1.88k
      return LK_Exclusive;
2564
3.14k
  }
2565
0
  llvm_unreachable("Unknown AccessKind");
2566
0
}