blob: e8bae52bd56052e7b964f02dfde3579ae5b11737 [file] [log] [blame]
Caitlin Sadowski33208342011-09-09 16:11:56 +00001//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// A intra-procedural analysis for thread safety (e.g. deadlocks and race
11// conditions), based off of an annotation system.
12//
Caitlin Sadowski5b34a2f2011-09-14 20:05:09 +000013// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more
14// information.
Caitlin Sadowski33208342011-09-09 16:11:56 +000015//
16//===----------------------------------------------------------------------===//
17
18#include "clang/Analysis/Analyses/ThreadSafety.h"
Caitlin Sadowski82e2de52011-09-09 23:00:59 +000019#include "clang/Analysis/AnalysisContext.h"
20#include "clang/Analysis/CFG.h"
21#include "clang/Analysis/CFGStmtMap.h"
Caitlin Sadowski33208342011-09-09 16:11:56 +000022#include "clang/AST/DeclCXX.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/AST/StmtVisitor.h"
Caitlin Sadowski82e2de52011-09-09 23:00:59 +000026#include "clang/Basic/SourceManager.h"
27#include "clang/Basic/SourceLocation.h"
Caitlin Sadowski33208342011-09-09 16:11:56 +000028#include "llvm/ADT/BitVector.h"
29#include "llvm/ADT/FoldingSet.h"
30#include "llvm/ADT/ImmutableMap.h"
31#include "llvm/ADT/PostOrderIterator.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringRef.h"
34#include <algorithm>
35#include <vector>
36
37using namespace clang;
38using namespace thread_safety;
39
Caitlin Sadowski5b34a2f2011-09-14 20:05:09 +000040// Key method definition
41ThreadSafetyHandler::~ThreadSafetyHandler() {}
42
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +000043// Helper functions
44static Expr *getParent(Expr *Exp) {
45 if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
46 return ME->getBase();
47 if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp))
48 return CE->getImplicitObjectArgument();
49 return 0;
50}
51
Caitlin Sadowski33208342011-09-09 16:11:56 +000052namespace {
53/// \brief Implements a set of CFGBlocks using a BitVector.
54///
55/// This class contains a minimal interface, primarily dictated by the SetType
56/// template parameter of the llvm::po_iterator template, as used with external
57/// storage. We also use this set to keep track of which CFGBlocks we visit
58/// during the analysis.
59class CFGBlockSet {
60 llvm::BitVector VisitedBlockIDs;
61
62public:
63 // po_iterator requires this iterator, but the only interface needed is the
64 // value_type typedef.
65 struct iterator {
66 typedef const CFGBlock *value_type;
67 };
68
69 CFGBlockSet() {}
70 CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
71
72 /// \brief Set the bit associated with a particular CFGBlock.
73 /// This is the important method for the SetType template parameter.
74 bool insert(const CFGBlock *Block) {
75 // Note that insert() is called by po_iterator, which doesn't check to make
76 // sure that Block is non-null. Moreover, the CFGBlock iterator will
77 // occasionally hand out null pointers for pruned edges, so we catch those
78 // here.
79 if (Block == 0)
80 return false; // if an edge is trivially false.
81 if (VisitedBlockIDs.test(Block->getBlockID()))
82 return false;
83 VisitedBlockIDs.set(Block->getBlockID());
84 return true;
85 }
86
87 /// \brief Check if the bit for a CFGBlock has been already set.
88 /// This method is for tracking visited blocks in the main threadsafety loop.
89 /// Block must not be null.
90 bool alreadySet(const CFGBlock *Block) {
91 return VisitedBlockIDs.test(Block->getBlockID());
92 }
93};
94
95/// \brief We create a helper class which we use to iterate through CFGBlocks in
96/// the topological order.
97class TopologicallySortedCFG {
98 typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator;
99
100 std::vector<const CFGBlock*> Blocks;
101
102public:
103 typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
104
105 TopologicallySortedCFG(const CFG *CFGraph) {
106 Blocks.reserve(CFGraph->getNumBlockIDs());
107 CFGBlockSet BSet(CFGraph);
108
109 for (po_iterator I = po_iterator::begin(CFGraph, BSet),
110 E = po_iterator::end(CFGraph, BSet); I != E; ++I) {
111 Blocks.push_back(*I);
112 }
113 }
114
115 iterator begin() {
116 return Blocks.rbegin();
117 }
118
119 iterator end() {
120 return Blocks.rend();
121 }
Caitlin Sadowski6525fb22011-09-15 17:43:08 +0000122
123 bool empty() {
124 return begin() == end();
125 }
Caitlin Sadowski33208342011-09-09 16:11:56 +0000126};
127
128/// \brief A MutexID object uniquely identifies a particular mutex, and
129/// is built from an Expr* (i.e. calling a lock function).
130///
131/// Thread-safety analysis works by comparing lock expressions. Within the
132/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
133/// a particular mutex object at run-time. Subsequent occurrences of the same
134/// expression (where "same" means syntactic equality) will refer to the same
135/// run-time object if three conditions hold:
136/// (1) Local variables in the expression, such as "x" have not changed.
137/// (2) Values on the heap that affect the expression have not changed.
138/// (3) The expression involves only pure function calls.
139/// The current implementation assumes, but does not verify, that multiple uses
140/// of the same lock expression satisfies these criteria.
141///
142/// Clang introduces an additional wrinkle, which is that it is difficult to
143/// derive canonical expressions, or compare expressions directly for equality.
144/// Thus, we identify a mutex not by an Expr, but by the set of named
145/// declarations that are referenced by the Expr. In other words,
146/// x->foo->bar.mu will be a four element vector with the Decls for
147/// mu, bar, and foo, and x. The vector will uniquely identify the expression
148/// for all practical purposes.
149///
150/// Note we will need to perform substitution on "this" and function parameter
151/// names when constructing a lock expression.
152///
153/// For example:
154/// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
155/// void myFunc(C *X) { ... X->lock() ... }
156/// The original expression for the mutex acquired by myFunc is "this->Mu", but
157/// "X" is substituted for "this" so we get X->Mu();
158///
159/// For another example:
160/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
161/// MyList *MyL;
162/// foo(MyL); // requires lock MyL->Mu to be held
163class MutexID {
164 SmallVector<NamedDecl*, 2> DeclSeq;
165
166 /// Build a Decl sequence representing the lock from the given expression.
167 /// Recursive function that bottoms out when the final DeclRefExpr is reached.
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000168 void buildMutexID(Expr *Exp, Expr *Parent) {
Caitlin Sadowski33208342011-09-09 16:11:56 +0000169 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
170 NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
171 DeclSeq.push_back(ND);
172 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
173 NamedDecl *ND = ME->getMemberDecl();
174 DeclSeq.push_back(ND);
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000175 buildMutexID(ME->getBase(), Parent);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000176 } else if (isa<CXXThisExpr>(Exp)) {
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000177 if (Parent)
178 buildMutexID(Parent, 0);
179 else
180 return; // mutexID is still valid in this case
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000181 } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp))
182 buildMutexID(CE->getSubExpr(), Parent);
183 else
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000184 DeclSeq.clear(); // invalid lock expression
Caitlin Sadowski33208342011-09-09 16:11:56 +0000185 }
186
187public:
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000188 MutexID(Expr *LExpr, Expr *ParentExpr) {
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000189 buildMutexID(LExpr, ParentExpr);
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000190 }
191
192 /// If we encounter part of a lock expression we cannot parse
193 bool isValid() const {
194 return !DeclSeq.empty();
Caitlin Sadowski33208342011-09-09 16:11:56 +0000195 }
196
197 bool operator==(const MutexID &other) const {
198 return DeclSeq == other.DeclSeq;
199 }
200
201 bool operator!=(const MutexID &other) const {
202 return !(*this == other);
203 }
204
205 // SmallVector overloads Operator< to do lexicographic ordering. Note that
206 // we use pointer equality (and <) to compare NamedDecls. This means the order
207 // of MutexIDs in a lockset is nondeterministic. In order to output
208 // diagnostics in a deterministic ordering, we must order all diagnostics to
209 // output by SourceLocation when iterating through this lockset.
210 bool operator<(const MutexID &other) const {
211 return DeclSeq < other.DeclSeq;
212 }
213
214 /// \brief Returns the name of the first Decl in the list for a given MutexID;
215 /// e.g. the lock expression foo.bar() has name "bar".
216 /// The caret will point unambiguously to the lock expression, so using this
217 /// name in diagnostics is a way to get simple, and consistent, mutex names.
218 /// We do not want to output the entire expression text for security reasons.
219 StringRef getName() const {
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000220 assert(isValid());
Caitlin Sadowski33208342011-09-09 16:11:56 +0000221 return DeclSeq.front()->getName();
222 }
223
224 void Profile(llvm::FoldingSetNodeID &ID) const {
225 for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
226 E = DeclSeq.end(); I != E; ++I) {
227 ID.AddPointer(*I);
228 }
229 }
230};
231
232/// \brief This is a helper class that stores info about the most recent
233/// accquire of a Lock.
234///
235/// The main body of the analysis maps MutexIDs to LockDatas.
236struct LockData {
237 SourceLocation AcquireLoc;
238
239 /// \brief LKind stores whether a lock is held shared or exclusively.
240 /// Note that this analysis does not currently support either re-entrant
241 /// locking or lock "upgrading" and "downgrading" between exclusive and
242 /// shared.
243 ///
244 /// FIXME: add support for re-entrant locking and lock up/downgrading
245 LockKind LKind;
246
247 LockData(SourceLocation AcquireLoc, LockKind LKind)
248 : AcquireLoc(AcquireLoc), LKind(LKind) {}
249
250 bool operator==(const LockData &other) const {
251 return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
252 }
253
254 bool operator!=(const LockData &other) const {
255 return !(*this == other);
256 }
257
258 void Profile(llvm::FoldingSetNodeID &ID) const {
259 ID.AddInteger(AcquireLoc.getRawEncoding());
260 ID.AddInteger(LKind);
261 }
262};
263
264/// A Lockset maps each MutexID (defined above) to information about how it has
265/// been locked.
266typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
267
268/// \brief We use this class to visit different types of expressions in
269/// CFGBlocks, and build up the lockset.
270/// An expression may cause us to add or remove locks from the lockset, or else
271/// output error messages related to missing locks.
272/// FIXME: In future, we may be able to not inherit from a visitor.
273class BuildLockset : public StmtVisitor<BuildLockset> {
274 ThreadSafetyHandler &Handler;
275 Lockset LSet;
276 Lockset::Factory &LocksetFactory;
277
278 // Helper functions
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000279 void removeLock(SourceLocation UnlockLoc, Expr *LockExp, Expr *Parent);
280 void addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
281 LockKind LK);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000282 const ValueDecl *getValueDecl(Expr *Exp);
283 void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
284 Expr *MutexExp, ProtectedOperationKind POK);
285 void checkAccess(Expr *Exp, AccessKind AK);
286 void checkDereference(Expr *Exp, AccessKind AK);
287
288 template <class AttrType>
289 void addLocksToSet(LockKind LK, Attr *Attr, CXXMemberCallExpr *Exp);
290
291 /// \brief Returns true if the lockset contains a lock, regardless of whether
292 /// the lock is held exclusively or shared.
293 bool locksetContains(MutexID Lock) const {
294 return LSet.lookup(Lock);
295 }
296
297 /// \brief Returns true if the lockset contains a lock with the passed in
298 /// locktype.
299 bool locksetContains(MutexID Lock, LockKind KindRequested) const {
300 const LockData *LockHeld = LSet.lookup(Lock);
301 return (LockHeld && KindRequested == LockHeld->LKind);
302 }
303
304 /// \brief Returns true if the lockset contains a lock with at least the
305 /// passed in locktype. So for example, if we pass in LK_Shared, this function
306 /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
307 /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
308 bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const {
309 switch (KindRequested) {
310 case LK_Shared:
311 return locksetContains(Lock);
312 case LK_Exclusive:
313 return locksetContains(Lock, KindRequested);
314 }
Benjamin Kramer8a8051f2011-09-10 21:52:04 +0000315 llvm_unreachable("Unknown LockKind");
Caitlin Sadowski33208342011-09-09 16:11:56 +0000316 }
317
318public:
319 BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F)
320 : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS),
321 LocksetFactory(F) {}
322
323 Lockset getLockset() {
324 return LSet;
325 }
326
327 void VisitUnaryOperator(UnaryOperator *UO);
328 void VisitBinaryOperator(BinaryOperator *BO);
329 void VisitCastExpr(CastExpr *CE);
330 void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp);
331};
332
333/// \brief Add a new lock to the lockset, warning if the lock is already there.
334/// \param LockLoc The source location of the acquire
335/// \param LockExp The lock expression corresponding to the lock to be added
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000336void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
Caitlin Sadowski33208342011-09-09 16:11:56 +0000337 LockKind LK) {
338 // FIXME: deal with acquired before/after annotations
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000339 MutexID Mutex(LockExp, Parent);
340 if (!Mutex.isValid()) {
341 Handler.handleInvalidLockExp(LockExp->getExprLoc());
342 return;
343 }
344
Caitlin Sadowski33208342011-09-09 16:11:56 +0000345 LockData NewLock(LockLoc, LK);
346
347 // FIXME: Don't always warn when we have support for reentrant locks.
348 if (locksetContains(Mutex))
349 Handler.handleDoubleLock(Mutex.getName(), LockLoc);
350 LSet = LocksetFactory.add(LSet, Mutex, NewLock);
351}
352
353/// \brief Remove a lock from the lockset, warning if the lock is not there.
354/// \param LockExp The lock expression corresponding to the lock to be removed
355/// \param UnlockLoc The source location of the unlock (only used in error msg)
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000356void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp,
357 Expr *Parent) {
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000358 MutexID Mutex(LockExp, Parent);
359 if (!Mutex.isValid()) {
360 Handler.handleInvalidLockExp(LockExp->getExprLoc());
361 return;
362 }
Caitlin Sadowski33208342011-09-09 16:11:56 +0000363
364 Lockset NewLSet = LocksetFactory.remove(LSet, Mutex);
365 if(NewLSet == LSet)
366 Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
367
368 LSet = NewLSet;
369}
370
371/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
372const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
373 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
374 return DR->getDecl();
375
376 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
377 return ME->getMemberDecl();
378
379 return 0;
380}
381
382/// \brief Warn if the LSet does not contain a lock sufficient to protect access
383/// of at least the passed in AccessType.
384void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
385 AccessKind AK, Expr *MutexExp,
386 ProtectedOperationKind POK) {
387 LockKind LK = getLockKindFromAccessKind(AK);
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000388 Expr *Parent = getParent(Exp);
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000389 MutexID Mutex(MutexExp, Parent);
390 if (!Mutex.isValid())
391 Handler.handleInvalidLockExp(MutexExp->getExprLoc());
392 else if (!locksetContainsAtLeast(Mutex, LK))
Caitlin Sadowski33208342011-09-09 16:11:56 +0000393 Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
394}
395
396
397/// \brief This method identifies variable dereferences and checks pt_guarded_by
398/// and pt_guarded_var annotations. Note that we only check these annotations
399/// at the time a pointer is dereferenced.
400/// FIXME: We need to check for other types of pointer dereferences
401/// (e.g. [], ->) and deal with them here.
402/// \param Exp An expression that has been read or written.
403void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
404 UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
405 if (!UO || UO->getOpcode() != clang::UO_Deref)
406 return;
407 Exp = UO->getSubExpr()->IgnoreParenCasts();
408
409 const ValueDecl *D = getValueDecl(Exp);
410 if(!D || !D->hasAttrs())
411 return;
412
413 if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
414 Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
415
416 const AttrVec &ArgAttrs = D->getAttrs();
417 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
418 if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
419 warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
420}
421
422/// \brief Checks guarded_by and guarded_var attributes.
423/// Whenever we identify an access (read or write) of a DeclRefExpr or
424/// MemberExpr, we need to check whether there are any guarded_by or
425/// guarded_var attributes, and make sure we hold the appropriate mutexes.
426void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
427 const ValueDecl *D = getValueDecl(Exp);
428 if(!D || !D->hasAttrs())
429 return;
430
431 if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
432 Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
433
434 const AttrVec &ArgAttrs = D->getAttrs();
435 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
436 if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
437 warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
438}
439
440/// \brief For unary operations which read and write a variable, we need to
441/// check whether we hold any required mutexes. Reads are checked in
442/// VisitCastExpr.
443void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
444 switch (UO->getOpcode()) {
445 case clang::UO_PostDec:
446 case clang::UO_PostInc:
447 case clang::UO_PreDec:
448 case clang::UO_PreInc: {
449 Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
450 checkAccess(SubExp, AK_Written);
451 checkDereference(SubExp, AK_Written);
452 break;
453 }
454 default:
455 break;
456 }
457}
458
459/// For binary operations which assign to a variable (writes), we need to check
460/// whether we hold any required mutexes.
461/// FIXME: Deal with non-primitive types.
462void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
463 if (!BO->isAssignmentOp())
464 return;
465 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
466 checkAccess(LHSExp, AK_Written);
467 checkDereference(LHSExp, AK_Written);
468}
469
470/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
471/// need to ensure we hold any required mutexes.
472/// FIXME: Deal with non-primitive types.
473void BuildLockset::VisitCastExpr(CastExpr *CE) {
474 if (CE->getCastKind() != CK_LValueToRValue)
475 return;
476 Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
477 checkAccess(SubExp, AK_Read);
478 checkDereference(SubExp, AK_Read);
479}
480
481/// \brief This function, parameterized by an attribute type, is used to add a
482/// set of locks specified as attribute arguments to the lockset.
483template <typename AttrType>
484void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
485 CXXMemberCallExpr *Exp) {
486 typedef typename AttrType::args_iterator iterator_type;
487 SourceLocation ExpLocation = Exp->getExprLoc();
488 Expr *Parent = Exp->getImplicitObjectArgument();
489 AttrType *SpecificAttr = cast<AttrType>(Attr);
490
491 if (SpecificAttr->args_size() == 0) {
492 // The mutex held is the "this" object.
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000493 addLock(ExpLocation, Parent, 0, LK);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000494 return;
495 }
496
497 for (iterator_type I = SpecificAttr->args_begin(),
498 E = SpecificAttr->args_end(); I != E; ++I)
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000499 addLock(ExpLocation, *I, Parent, LK);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000500}
501
502/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on
503/// the method that is being called and add, remove or check locks in the
504/// lockset accordingly.
505///
506/// FIXME: For classes annotated with one of the guarded annotations, we need
507/// to treat const method calls as reads and non-const method calls as writes,
508/// and check that the appropriate locks are held. Non-const method calls with
509/// the same signature as const method calls can be also treated as reads.
510///
511/// FIXME: We need to also visit CallExprs to catch/check global functions.
512void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
513 NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
514
515 SourceLocation ExpLocation = Exp->getExprLoc();
516 Expr *Parent = Exp->getImplicitObjectArgument();
517
518 if(!D || !D->hasAttrs())
519 return;
520
521 AttrVec &ArgAttrs = D->getAttrs();
522 for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
523 Attr *Attr = ArgAttrs[i];
524 switch (Attr->getKind()) {
525 // When we encounter an exclusive lock function, we need to add the lock
526 // to our lockset with kind exclusive.
527 case attr::ExclusiveLockFunction:
528 addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp);
529 break;
530
531 // When we encounter a shared lock function, we need to add the lock
532 // to our lockset with kind shared.
533 case attr::SharedLockFunction:
534 addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp);
535 break;
536
537 // When we encounter an unlock function, we need to remove unlocked
538 // mutexes from the lockset, and flag a warning if they are not there.
539 case attr::UnlockFunction: {
540 UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
541
542 if (UFAttr->args_size() == 0) { // The lock held is the "this" object.
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000543 removeLock(ExpLocation, Parent, 0);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000544 break;
545 }
546
547 for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(),
548 E = UFAttr->args_end(); I != E; ++I)
Caitlin Sadowskiff2f3f82011-09-09 16:21:55 +0000549 removeLock(ExpLocation, *I, Parent);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000550 break;
551 }
552
553 case attr::ExclusiveLocksRequired: {
554 // FIXME: Also use this attribute to add required locks to the initial
555 // lockset when processing a CFG for a function annotated with this
556 // attribute.
557 ExclusiveLocksRequiredAttr *ELRAttr =
558 cast<ExclusiveLocksRequiredAttr>(Attr);
559
560 for (ExclusiveLocksRequiredAttr::args_iterator
561 I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
562 warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
563 break;
564 }
565
566 case attr::SharedLocksRequired: {
567 // FIXME: Also use this attribute to add required locks to the initial
568 // lockset when processing a CFG for a function annotated with this
569 // attribute.
570 SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
571
572 for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
573 E = SLRAttr->args_end(); I != E; ++I)
574 warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
575 break;
576 }
577
578 case attr::LocksExcluded: {
579 LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
580 for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
581 E = LEAttr->args_end(); I != E; ++I) {
Caitlin Sadowski787c2a12011-09-14 20:00:24 +0000582 MutexID Mutex(*I, Parent);
583 if (!Mutex.isValid())
584 Handler.handleInvalidLockExp((*I)->getExprLoc());
585 else if (locksetContains(Mutex))
Caitlin Sadowski33208342011-09-09 16:11:56 +0000586 Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
587 ExpLocation);
588 }
589 break;
590 }
591
592 case attr::LockReturned:
593 // FIXME: Deal with this attribute.
594 break;
595
596 // Ignore other (non thread-safety) attributes
597 default:
598 break;
599 }
600 }
601}
602
603} // end anonymous namespace
604
Caitlin Sadowskiaf9b7c52011-09-15 17:25:19 +0000605/// \brief Compute the intersection of two locksets and issue warnings for any
606/// locks in the symmetric difference.
607///
608/// This function is used at a merge point in the CFG when comparing the lockset
609/// of each branch being merged. For example, given the following sequence:
610/// A; if () then B; else C; D; we need to check that the lockset after B and C
611/// are the same. In the event of a difference, we use the intersection of these
612/// two locksets at the start of D.
613static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
614 const Lockset LSet1, const Lockset LSet2,
615 Lockset::Factory &Fact, LockErrorKind LEK) {
616 Lockset Intersection = LSet1;
Caitlin Sadowski33208342011-09-09 16:11:56 +0000617 for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
618 const MutexID &LSet2Mutex = I.getKey();
619 const LockData &LSet2LockData = I.getData();
620 if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
621 if (LD->LKind != LSet2LockData.LKind) {
622 Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
623 LSet2LockData.AcquireLoc,
624 LD->AcquireLoc);
625 if (LD->LKind != LK_Exclusive)
626 Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData);
627 }
628 } else {
629 Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
Caitlin Sadowskiaf9b7c52011-09-15 17:25:19 +0000630 LSet2LockData.AcquireLoc, LEK);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000631 }
632 }
Caitlin Sadowski33208342011-09-09 16:11:56 +0000633
634 for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
635 if (!LSet2.contains(I.getKey())) {
636 const MutexID &Mutex = I.getKey();
637 const LockData &MissingLock = I.getData();
638 Handler.handleMutexHeldEndOfScope(Mutex.getName(),
Caitlin Sadowskiaf9b7c52011-09-15 17:25:19 +0000639 MissingLock.AcquireLoc, LEK);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000640 Intersection = Fact.remove(Intersection, Mutex);
641 }
642 }
643 return Intersection;
644}
645
646/// \brief Returns the location of the first Stmt in a Block.
Caitlin Sadowski6525fb22011-09-15 17:43:08 +0000647static SourceLocation getFirstStmtLocation(const CFGBlock *Block) {
Caitlin Sadowski33208342011-09-09 16:11:56 +0000648 SourceLocation Loc;
649 for (CFGBlock::const_iterator BI = Block->begin(), BE = Block->end();
650 BI != BE; ++BI) {
651 if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&(*BI))) {
652 Loc = CfgStmt->getStmt()->getLocStart();
653 if (Loc.isValid()) return Loc;
654 }
655 }
Caitlin Sadowski6525fb22011-09-15 17:43:08 +0000656 if (const Stmt *S = Block->getTerminator().getStmt()) {
Caitlin Sadowski33208342011-09-09 16:11:56 +0000657 Loc = S->getLocStart();
658 if (Loc.isValid()) return Loc;
659 }
660 return Loc;
661}
662
Caitlin Sadowski6525fb22011-09-15 17:43:08 +0000663static Lockset addLock(ThreadSafetyHandler &Handler,
664 Lockset::Factory &LocksetFactory,
665 Lockset &LSet, Expr *LockExp, LockKind LK,
666 SourceLocation Loc) {
667 MutexID Mutex(LockExp, 0);
668 if (!Mutex.isValid()) {
669 Handler.handleInvalidLockExp(LockExp->getExprLoc());
670 return LSet;
671 }
672 LockData NewLock(Loc, LK);
673 return LocksetFactory.add(LSet, Mutex, NewLock);
674}
675
Caitlin Sadowskieecd2732011-09-12 22:28:41 +0000676namespace clang {
677namespace thread_safety {
Caitlin Sadowski33208342011-09-09 16:11:56 +0000678/// \brief Check a function's CFG for thread-safety violations.
679///
680/// We traverse the blocks in the CFG, compute the set of mutexes that are held
681/// at the end of each block, and issue warnings for thread safety violations.
682/// Each block in the CFG is traversed exactly once.
683void runThreadSafetyAnalysis(AnalysisContext &AC,
684 ThreadSafetyHandler &Handler) {
685 CFG *CFGraph = AC.getCFG();
686 if (!CFGraph) return;
687 const Decl *D = AC.getDecl();
688 if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return;
689
690 Lockset::Factory LocksetFactory;
691
692 // FIXME: Swith to SmallVector? Otherwise improve performance impact?
693 std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(),
694 LocksetFactory.getEmptyMap());
695 std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(),
696 LocksetFactory.getEmptyMap());
697
698 // We need to explore the CFG via a "topological" ordering.
699 // That way, we will be guaranteed to have information about required
700 // predecessor locksets when exploring a new block.
701 TopologicallySortedCFG SortedGraph(CFGraph);
702 CFGBlockSet VisitedBlocks(CFGraph);
703
Caitlin Sadowski6525fb22011-09-15 17:43:08 +0000704 if (!SortedGraph.empty() && D->hasAttrs()) {
705 const CFGBlock *FirstBlock = *SortedGraph.begin();
706 Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()];
707 const AttrVec &ArgAttrs = D->getAttrs();
708 for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
709 Attr *Attr = ArgAttrs[i];
710 if (SharedLocksRequiredAttr *SLRAttr
711 = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
712 for (SharedLocksRequiredAttr::args_iterator
713 SLRIter = SLRAttr->args_begin(),
714 SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
715 InitialLockset = addLock(Handler, LocksetFactory, InitialLockset,
716 *SLRIter, LK_Shared,
717 getFirstStmtLocation(FirstBlock));
718 } else if (ExclusiveLocksRequiredAttr *ELRAttr
719 = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
720 for (ExclusiveLocksRequiredAttr::args_iterator
721 ELRIter = ELRAttr->args_begin(),
722 ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
723 InitialLockset = addLock(Handler, LocksetFactory, InitialLockset,
724 *ELRIter, LK_Exclusive,
725 getFirstStmtLocation(FirstBlock));
726 }
727 }
728 }
729
Caitlin Sadowski33208342011-09-09 16:11:56 +0000730 for (TopologicallySortedCFG::iterator I = SortedGraph.begin(),
731 E = SortedGraph.end(); I!= E; ++I) {
732 const CFGBlock *CurrBlock = *I;
733 int CurrBlockID = CurrBlock->getBlockID();
734
735 VisitedBlocks.insert(CurrBlock);
736
737 // Use the default initial lockset in case there are no predecessors.
738 Lockset &Entryset = EntryLocksets[CurrBlockID];
739 Lockset &Exitset = ExitLocksets[CurrBlockID];
740
741 // Iterate through the predecessor blocks and warn if the lockset for all
742 // predecessors is not the same. We take the entry lockset of the current
743 // block to be the intersection of all previous locksets.
744 // FIXME: By keeping the intersection, we may output more errors in future
745 // for a lock which is not in the intersection, but was in the union. We
746 // may want to also keep the union in future. As an example, let's say
747 // the intersection contains Mutex L, and the union contains L and M.
748 // Later we unlock M. At this point, we would output an error because we
749 // never locked M; although the real error is probably that we forgot to
750 // lock M on all code paths. Conversely, let's say that later we lock M.
751 // In this case, we should compare against the intersection instead of the
752 // union because the real error is probably that we forgot to unlock M on
753 // all code paths.
754 bool LocksetInitialized = false;
755 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
756 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
757
758 // if *PI -> CurrBlock is a back edge
759 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
760 continue;
761
762 int PrevBlockID = (*PI)->getBlockID();
763 if (!LocksetInitialized) {
764 Entryset = ExitLocksets[PrevBlockID];
765 LocksetInitialized = true;
766 } else {
767 Entryset = intersectAndWarn(Handler, Entryset,
Caitlin Sadowskiaf9b7c52011-09-15 17:25:19 +0000768 ExitLocksets[PrevBlockID], LocksetFactory,
769 LEK_LockedSomePredecessors);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000770 }
771 }
772
773 BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory);
774 for (CFGBlock::const_iterator BI = CurrBlock->begin(),
775 BE = CurrBlock->end(); BI != BE; ++BI) {
776 if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI))
777 LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt()));
778 }
779 Exitset = LocksetBuilder.getLockset();
780
781 // For every back edge from CurrBlock (the end of the loop) to another block
782 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
783 // the one held at the beginning of FirstLoopBlock. We can look up the
784 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
785 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
786 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
787
788 // if CurrBlock -> *SI is *not* a back edge
789 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
790 continue;
791
792 CFGBlock *FirstLoopBlock = *SI;
Caitlin Sadowski33208342011-09-09 16:11:56 +0000793 Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()];
794 Lockset LoopEnd = ExitLocksets[CurrBlockID];
Caitlin Sadowskiaf9b7c52011-09-15 17:25:19 +0000795 intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory,
796 LEK_LockedSomeLoopIterations);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000797 }
798 }
799
Caitlin Sadowskiaf9b7c52011-09-15 17:25:19 +0000800 Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()];
Caitlin Sadowski33208342011-09-09 16:11:56 +0000801 Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()];
Caitlin Sadowskiaf9b7c52011-09-15 17:25:19 +0000802 intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory,
803 LEK_LockedAtEndOfFunction);
Caitlin Sadowski33208342011-09-09 16:11:56 +0000804}
805
806/// \brief Helper function that returns a LockKind required for the given level
807/// of access.
808LockKind getLockKindFromAccessKind(AccessKind AK) {
809 switch (AK) {
810 case AK_Read :
811 return LK_Shared;
812 case AK_Written :
813 return LK_Exclusive;
814 }
Benjamin Kramer8a8051f2011-09-10 21:52:04 +0000815 llvm_unreachable("Unknown AccessKind");
Caitlin Sadowski33208342011-09-09 16:11:56 +0000816}
817}} // end namespace clang::thread_safety