Remove tabs, and whitespace cleanups.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@81346 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Analysis/AnalysisContext.cpp b/lib/Analysis/AnalysisContext.cpp
index da671d6..a4cb66b 100644
--- a/lib/Analysis/AnalysisContext.cpp
+++ b/lib/Analysis/AnalysisContext.cpp
@@ -45,18 +45,18 @@
const ImplicitParamDecl *AnalysisContext::getSelfDecl() const {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSelfDecl();
-
+
return NULL;
}
CFG *AnalysisContext::getCFG() {
- if (!cfg)
+ if (!cfg)
cfg = CFG::buildCFG(getBody(), &D->getASTContext());
return cfg;
}
ParentMap &AnalysisContext::getParentMap() {
- if (!PM)
+ if (!PM)
PM = new ParentMap(getBody());
return *PM;
}
@@ -66,12 +66,12 @@
CFG *c = getCFG();
if (!c)
return 0;
-
+
liveness = new LiveVariables(D->getASTContext(), *c);
liveness->runOnCFG(*c);
liveness->runOnAllBlocks(*c, 0, true);
}
-
+
return liveness;
}
@@ -79,7 +79,7 @@
AnalysisContext *&AC = Contexts[D];
if (!AC)
AC = new AnalysisContext(D);
-
+
return AC;
}
@@ -104,14 +104,14 @@
}
StackFrameContext*
-LocationContextManager::getStackFrame(AnalysisContext *ctx,
+LocationContextManager::getStackFrame(AnalysisContext *ctx,
const LocationContext *parent,
const Stmt *s) {
llvm::FoldingSetNodeID ID;
StackFrameContext::Profile(ID, ctx, parent, s);
void *InsertPos;
- StackFrameContext *f =
+ StackFrameContext *f =
cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!f) {
f = new StackFrameContext(ctx, parent, s);
@@ -126,7 +126,7 @@
llvm::FoldingSetNodeID ID;
ScopeContext::Profile(ID, ctx, parent, s);
void *InsertPos;
-
+
ScopeContext *scope =
cast_or_null<ScopeContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
diff --git a/lib/Analysis/AnalysisManager.cpp b/lib/Analysis/AnalysisManager.cpp
index b73e86d..623db17 100644
--- a/lib/Analysis/AnalysisManager.cpp
+++ b/lib/Analysis/AnalysisManager.cpp
@@ -17,12 +17,12 @@
using namespace clang;
void AnalysisManager::DisplayFunction() {
-
+
if (DisplayedFunction)
return;
-
+
DisplayedFunction = true;
-
+
// FIXME: Is getCodeDecl() always a named decl?
if (isa<FunctionDecl>(getCodeDecl()) ||
isa<ObjCMethodDecl>(getCodeDecl())) {
diff --git a/lib/Analysis/BasicConstraintManager.cpp b/lib/Analysis/BasicConstraintManager.cpp
index cb89d30..d0b8289 100644
--- a/lib/Analysis/BasicConstraintManager.cpp
+++ b/lib/Analysis/BasicConstraintManager.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines BasicConstraintManager, a class that tracks simple
+// This file defines BasicConstraintManager, a class that tracks simple
// equality and inequality constraints on symbolic values of GRState.
//
//===----------------------------------------------------------------------===//
@@ -27,22 +27,22 @@
typedef llvm::ImmutableMap<SymbolRef,GRState::IntSetTy> ConstNotEqTy;
typedef llvm::ImmutableMap<SymbolRef,const llvm::APSInt*> ConstEqTy;
-
+
static int ConstEqIndex = 0;
static int ConstNotEqIndex = 0;
namespace clang {
template<>
struct GRStateTrait<ConstNotEq> : public GRStatePartialTrait<ConstNotEqTy> {
- static inline void* GDMIndex() { return &ConstNotEqIndex; }
+ static inline void* GDMIndex() { return &ConstNotEqIndex; }
};
template<>
struct GRStateTrait<ConstEq> : public GRStatePartialTrait<ConstEqTy> {
- static inline void* GDMIndex() { return &ConstEqIndex; }
+ static inline void* GDMIndex() { return &ConstEqIndex; }
};
-}
-
+}
+
namespace {
// BasicConstraintManager only tracks equality and inequality constraints of
// constants and integer variables.
@@ -50,7 +50,7 @@
: public SimpleConstraintManager {
GRState::IntSetTy::Factory ISetFactory;
public:
- BasicConstraintManager(GRStateManager& statemgr)
+ BasicConstraintManager(GRStateManager& statemgr)
: ISetFactory(statemgr.getAllocator()) {}
const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
@@ -83,7 +83,7 @@
const GRState* RemoveDeadBindings(const GRState* state, SymbolReaper& SymReaper);
- void print(const GRState* state, llvm::raw_ostream& Out,
+ void print(const GRState* state, llvm::raw_ostream& Out,
const char* nl, const char *sep);
};
@@ -133,7 +133,7 @@
// These logic will be handled in another ConstraintManager.
const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state,
SymbolRef sym,
- const llvm::APSInt& V) {
+ const llvm::APSInt& V) {
// Is 'V' the smallest possible value?
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
// sym cannot be any value less than 'V'. This path is infeasible.
@@ -167,14 +167,14 @@
bool isFeasible = *X >= V;
return isFeasible ? state : NULL;
}
-
+
// Sym is not a constant, but it is worth looking to see if V is the
// maximum integer value.
if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
// If we know that sym != V, then this condition is infeasible since
- // there is no other value greater than V.
+ // there is no other value greater than V.
bool isFeasible = !isNotEqual(state, sym, V);
-
+
// If the path is still feasible then as a consequence we know that
// 'sym == V' because we cannot have 'sym > V' (no larger values).
// Add this constraint.
@@ -193,20 +193,20 @@
bool isFeasible = *X <= V;
return isFeasible ? state : NULL;
}
-
+
// Sym is not a constant, but it is worth looking to see if V is the
// minimum integer value.
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
// If we know that sym != V, then this condition is infeasible since
- // there is no other value less than V.
+ // there is no other value less than V.
bool isFeasible = !isNotEqual(state, sym, V);
-
+
// If the path is still feasible then as a consequence we know that
// 'sym == V' because we cannot have 'sym < V' (no smaller values).
// Add this constraint.
return isFeasible ? AddEQ(state, sym, V) : NULL;
}
-
+
return state;
}
@@ -222,10 +222,10 @@
// First, retrieve the NE-set associated with the given symbol.
ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet();
-
+
// Now add V to the NE set.
S = ISetFactory.Add(S, &V);
-
+
// Create a new state with the old binding replaced.
return state->set<ConstNotEq>(sym, S);
}
@@ -236,7 +236,7 @@
return T ? *T : NULL;
}
-bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym,
+bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym,
const llvm::APSInt& V) const {
// Retrieve the NE-set associated with the given symbol.
@@ -273,14 +273,14 @@
ConstNotEqTy::Factory& CNEFactory = state->get_context<ConstNotEq>();
for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) {
- SymbolRef sym = I.getKey();
+ SymbolRef sym = I.getKey();
if (SymReaper.maybeDead(sym)) CNE = CNEFactory.Remove(CNE, sym);
}
-
+
return state->set<ConstNotEq>(CNE);
}
-void BasicConstraintManager::print(const GRState* state, llvm::raw_ostream& Out,
+void BasicConstraintManager::print(const GRState* state, llvm::raw_ostream& Out,
const char* nl, const char *sep) {
// Print equality constraints.
@@ -293,23 +293,23 @@
}
// Print != constraints.
-
+
ConstNotEqTy CNE = state->get<ConstNotEq>();
-
+
if (!CNE.isEmpty()) {
Out << nl << sep << "'!=' constraints:";
-
+
for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) {
Out << nl << " $" << I.getKey() << " : ";
bool isFirst = true;
-
- GRState::IntSetTy::iterator J = I.getData().begin(),
- EJ = I.getData().end();
-
- for ( ; J != EJ; ++J) {
+
+ GRState::IntSetTy::iterator J = I.getData().begin(),
+ EJ = I.getData().end();
+
+ for ( ; J != EJ; ++J) {
if (isFirst) isFirst = false;
else Out << ", ";
-
+
Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream.
}
}
diff --git a/lib/Analysis/BasicObjCFoundationChecks.cpp b/lib/Analysis/BasicObjCFoundationChecks.cpp
index 8891099..9c20089 100644
--- a/lib/Analysis/BasicObjCFoundationChecks.cpp
+++ b/lib/Analysis/BasicObjCFoundationChecks.cpp
@@ -33,10 +33,10 @@
static const ObjCInterfaceType* GetReceiverType(const ObjCMessageExpr* ME) {
const Expr* Receiver = ME->getReceiver();
-
+
if (!Receiver)
return NULL;
-
+
if (const ObjCObjectPointerType *PT =
Receiver->getType()->getAsObjCObjectPointerType())
return PT->getInterfaceType();
@@ -56,75 +56,75 @@
public:
APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
};
-
+
class VISIBILITY_HIDDEN BasicObjCFoundationChecks : public GRSimpleAPICheck {
APIMisuse *BT;
BugReporter& BR;
ASTContext &Ctx;
-
+
bool isNSString(const ObjCInterfaceType *T, const char* suffix);
bool AuditNSString(ExplodedNode* N, const ObjCMessageExpr* ME);
-
- void Warn(ExplodedNode* N, const Expr* E, const std::string& s);
+
+ void Warn(ExplodedNode* N, const Expr* E, const std::string& s);
void WarnNilArg(ExplodedNode* N, const Expr* E);
-
+
bool CheckNilArg(ExplodedNode* N, unsigned Arg);
public:
- BasicObjCFoundationChecks(ASTContext& ctx, BugReporter& br)
+ BasicObjCFoundationChecks(ASTContext& ctx, BugReporter& br)
: BT(0), BR(br), Ctx(ctx) {}
-
+
bool Audit(ExplodedNode* N, GRStateManager&);
-
-private:
- void WarnNilArg(ExplodedNode* N, const ObjCMessageExpr* ME, unsigned Arg) {
+
+private:
+ void WarnNilArg(ExplodedNode* N, const ObjCMessageExpr* ME, unsigned Arg) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Argument to '" << GetReceiverNameType(ME) << "' method '"
<< ME->getSelector().getAsString() << "' cannot be nil.";
-
+
// Lazily create the BugType object for NilArg. This will be owned
// by the BugReporter object 'BR' once we call BR.EmitWarning.
if (!BT) BT = new APIMisuse("nil argument");
-
+
RangedBugReport *R = new RangedBugReport(*BT, os.str().c_str(), N);
R->addRange(ME->getArg(Arg)->getSourceRange());
BR.EmitReport(R);
}
};
-
+
} // end anonymous namespace
GRSimpleAPICheck*
clang::CreateBasicObjCFoundationChecks(ASTContext& Ctx, BugReporter& BR) {
- return new BasicObjCFoundationChecks(Ctx, BR);
+ return new BasicObjCFoundationChecks(Ctx, BR);
}
bool BasicObjCFoundationChecks::Audit(ExplodedNode* N,
GRStateManager&) {
-
+
const ObjCMessageExpr* ME =
cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
const ObjCInterfaceType *ReceiverType = GetReceiverType(ME);
-
+
if (!ReceiverType)
return false;
-
+
const char* name = ReceiverType->getDecl()->getIdentifier()->getName();
-
+
if (!name)
return false;
if (name[0] != 'N' || name[1] != 'S')
return false;
-
+
name += 2;
-
- // FIXME: Make all of this faster.
+
+ // FIXME: Make all of this faster.
if (isNSString(ReceiverType, name))
return AuditNSString(N, ME);
@@ -132,7 +132,7 @@
}
static inline bool isNil(SVal X) {
- return isa<loc::ConcreteInt>(X);
+ return isa<loc::ConcreteInt>(X);
}
//===----------------------------------------------------------------------===//
@@ -142,14 +142,14 @@
bool BasicObjCFoundationChecks::CheckNilArg(ExplodedNode* N, unsigned Arg) {
const ObjCMessageExpr* ME =
cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
-
+
const Expr * E = ME->getArg(Arg);
-
+
if (isNil(N->getState()->getSVal(E))) {
WarnNilArg(N, ME, Arg);
return true;
}
-
+
return false;
}
@@ -158,35 +158,35 @@
//===----------------------------------------------------------------------===//
bool BasicObjCFoundationChecks::isNSString(const ObjCInterfaceType *T,
- const char* suffix) {
+ const char* suffix) {
return !strcmp("String", suffix) || !strcmp("MutableString", suffix);
}
-bool BasicObjCFoundationChecks::AuditNSString(ExplodedNode* N,
+bool BasicObjCFoundationChecks::AuditNSString(ExplodedNode* N,
const ObjCMessageExpr* ME) {
-
+
Selector S = ME->getSelector();
-
+
if (S.isUnarySelector())
return false;
// FIXME: This is going to be really slow doing these checks with
// lexical comparisons.
-
+
std::string name = S.getAsString();
assert (!name.empty());
const char* cstr = &name[0];
unsigned len = name.size();
-
+
switch (len) {
default:
break;
- case 8:
+ case 8:
if (!strcmp(cstr, "compare:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 15:
// FIXME: Checking for initWithFormat: will not work in most cases
// yet because [NSString alloc] returns id, not NSString*. We will
@@ -194,41 +194,41 @@
// to find these errors.
if (!strcmp(cstr, "initWithFormat:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 16:
if (!strcmp(cstr, "compare:options:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 22:
if (!strcmp(cstr, "compare:options:range:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 23:
-
+
if (!strcmp(cstr, "caseInsensitiveCompare:"))
return CheckNilArg(N, 0);
-
+
break;
case 29:
if (!strcmp(cstr, "compare:options:range:locale:"))
return CheckNilArg(N, 0);
-
- break;
-
+
+ break;
+
case 37:
if (!strcmp(cstr, "componentsSeparatedByCharactersInSet:"))
return CheckNilArg(N, 0);
-
- break;
+
+ break;
}
-
+
return false;
}
@@ -240,7 +240,7 @@
class VISIBILITY_HIDDEN AuditCFNumberCreate : public GRSimpleAPICheck {
APIMisuse* BT;
-
+
// FIXME: Either this should be refactored into GRSimpleAPICheck, or
// it should always be passed with a call to Audit. The latter
// approach makes this class more stateless.
@@ -249,16 +249,16 @@
BugReporter& BR;
public:
- AuditCFNumberCreate(ASTContext& ctx, BugReporter& br)
+ AuditCFNumberCreate(ASTContext& ctx, BugReporter& br)
: BT(0), Ctx(ctx), II(&Ctx.Idents.get("CFNumberCreate")), BR(br){}
-
+
~AuditCFNumberCreate() {}
-
+
bool Audit(ExplodedNode* N, GRStateManager&);
-
+
private:
void AddError(const TypedRegion* R, const Expr* Ex, ExplodedNode *N,
- uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+ uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
};
} // end anonymous namespace
@@ -289,7 +289,7 @@
public:
Optional() : IsKnown(false), Val(0) {}
Optional(const T& val) : IsKnown(true), Val(val) {}
-
+
bool isKnown() const { return IsKnown; }
const T& getValue() const {
@@ -305,12 +305,12 @@
static Optional<uint64_t> GetCFNumberSize(ASTContext& Ctx, uint64_t i) {
static unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
-
+
if (i < kCFNumberCharType)
return FixedSize[i-1];
-
+
QualType T;
-
+
switch (i) {
case kCFNumberCharType: T = Ctx.CharTy; break;
case kCFNumberShortType: T = Ctx.ShortTy; break;
@@ -322,11 +322,11 @@
case kCFNumberCFIndexType:
case kCFNumberNSIntegerType:
case kCFNumberCGFloatType:
- // FIXME: We need a way to map from names to Type*.
+ // FIXME: We need a way to map from names to Type*.
default:
return Optional<uint64_t>();
}
-
+
return Ctx.getTypeSize(T);
}
@@ -350,72 +350,72 @@
"kCFNumberNSIntegerType",
"kCFNumberCGFloatType"
};
-
+
return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
}
#endif
-bool AuditCFNumberCreate::Audit(ExplodedNode* N,GRStateManager&){
+bool AuditCFNumberCreate::Audit(ExplodedNode* N,GRStateManager&){
const CallExpr* CE =
cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
- const Expr* Callee = CE->getCallee();
- SVal CallV = N->getState()->getSVal(Callee);
+ const Expr* Callee = CE->getCallee();
+ SVal CallV = N->getState()->getSVal(Callee);
const FunctionDecl* FD = CallV.getAsFunctionDecl();
if (!FD || FD->getIdentifier() != II || CE->getNumArgs()!=3)
return false;
-
+
// Get the value of the "theType" argument.
SVal TheTypeVal = N->getState()->getSVal(CE->getArg(1));
-
+
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
-
+
if (!V)
return false;
-
+
uint64_t NumberKind = V->getValue().getLimitedValue();
Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);
-
+
// FIXME: In some cases we can emit an error.
if (!TargetSize.isKnown())
return false;
-
+
// Look at the value of the integer being passed by reference. Essentially
// we want to catch cases where the value passed in is not equal to the
// size of the type being created.
SVal TheValueExpr = N->getState()->getSVal(CE->getArg(2));
-
+
// FIXME: Eventually we should handle arbitrary locations. We can do this
// by having an enhanced memory model that does low-level typing.
loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
if (!LV)
return false;
-
+
const TypedRegion* R = dyn_cast<TypedRegion>(LV->getBaseRegion());
if (!R)
return false;
QualType T = Ctx.getCanonicalType(R->getValueType(Ctx));
-
+
// FIXME: If the pointee isn't an integer type, should we flag a warning?
// People can do weird stuff with pointers.
-
- if (!T->isIntegerType())
+
+ if (!T->isIntegerType())
return false;
-
+
uint64_t SourceSize = Ctx.getTypeSize(T);
-
+
// CHECK: is SourceSize == TargetSize
-
+
if (SourceSize == TargetSize)
return false;
-
+
AddError(R, CE->getArg(2), N, SourceSize, TargetSize, NumberKind);
-
+
// FIXME: We can actually create an abstract "CFNumber" object that has
// the bits initialized to the provided values.
return SourceSize < TargetSize;
@@ -425,23 +425,23 @@
ExplodedNode *N,
uint64_t SourceSize, uint64_t TargetSize,
uint64_t NumberKind) {
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << (SourceSize == 8 ? "An " : "A ")
<< SourceSize << " bit integer is used to initialize a CFNumber "
"object that represents "
<< (TargetSize == 8 ? "an " : "a ")
- << TargetSize << " bit integer. ";
+ << TargetSize << " bit integer. ";
if (SourceSize < TargetSize)
os << (TargetSize - SourceSize)
- << " bits of the CFNumber value will be garbage." ;
+ << " bits of the CFNumber value will be garbage." ;
else
os << (SourceSize - TargetSize)
<< " bits of the input integer will be lost.";
-
+
// Lazily create the BugType object. This will be owned
// by the BugReporter object 'BR' once we call BR.EmitWarning.
if (!BT) BT = new APIMisuse("Bad use of CFNumberCreate");
@@ -451,7 +451,7 @@
}
GRSimpleAPICheck*
-clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
+clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
return new AuditCFNumberCreate(Ctx, BR);
}
@@ -462,22 +462,22 @@
namespace {
class VISIBILITY_HIDDEN AuditCFRetainRelease : public GRSimpleAPICheck {
APIMisuse *BT;
-
+
// FIXME: Either this should be refactored into GRSimpleAPICheck, or
// it should always be passed with a call to Audit. The latter
// approach makes this class more stateless.
ASTContext& Ctx;
IdentifierInfo *Retain, *Release;
BugReporter& BR;
-
+
public:
- AuditCFRetainRelease(ASTContext& ctx, BugReporter& br)
+ AuditCFRetainRelease(ASTContext& ctx, BugReporter& br)
: BT(0), Ctx(ctx),
Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease")),
BR(br){}
-
+
~AuditCFRetainRelease() {}
-
+
bool Audit(ExplodedNode* N, GRStateManager&);
};
} // end anonymous namespace
@@ -485,23 +485,23 @@
bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) {
const CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
-
+
// If the CallExpr doesn't have exactly 1 argument just give up checking.
if (CE->getNumArgs() != 1)
return false;
-
+
// Check if we called CFRetain/CFRelease.
const GRState* state = N->getState();
SVal X = state->getSVal(CE->getCallee());
const FunctionDecl* FD = X.getAsFunctionDecl();
-
+
if (!FD)
return false;
-
- const IdentifierInfo *FuncII = FD->getIdentifier();
+
+ const IdentifierInfo *FuncII = FD->getIdentifier();
if (!(FuncII == Retain || FuncII == Release))
return false;
-
+
// Finally, check if the argument is NULL.
// FIXME: We should be able to bifurcate the state here, as a successful
// check will result in the value not being NULL afterwards.
@@ -511,7 +511,7 @@
if (state->getStateManager().isEqual(state, CE->getArg(0), 0)) {
if (!BT)
BT = new APIMisuse("null passed to CFRetain/CFRelease");
-
+
const char *description = (FuncII == Retain)
? "Null pointer argument in call to CFRetain"
: "Null pointer argument in call to CFRelease";
@@ -524,10 +524,10 @@
return false;
}
-
-
+
+
GRSimpleAPICheck*
-clang::CreateAuditCFRetainRelease(ASTContext& Ctx, BugReporter& BR) {
+clang::CreateAuditCFRetainRelease(ASTContext& Ctx, BugReporter& BR) {
return new AuditCFRetainRelease(Ctx, BR);
}
@@ -541,8 +541,8 @@
Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, BR),
Stmt::ObjCMessageExprClass);
- Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass);
+ Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass);
Eng.AddCheck(CreateAuditCFRetainRelease(Ctx, BR), Stmt::CallExprClass);
-
+
RegisterNSErrorChecks(BR, Eng, D);
}
diff --git a/lib/Analysis/BasicObjCFoundationChecks.h b/lib/Analysis/BasicObjCFoundationChecks.h
index 8aa9960..1271ae4 100644
--- a/lib/Analysis/BasicObjCFoundationChecks.h
+++ b/lib/Analysis/BasicObjCFoundationChecks.h
@@ -25,24 +25,24 @@
#define LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
namespace clang {
-
+
class GRSimpleAPICheck;
class ASTContext;
-class GRStateManager;
+class GRStateManager;
class BugReporter;
class GRExprEngine;
-
+
GRSimpleAPICheck *CreateBasicObjCFoundationChecks(ASTContext& Ctx,
BugReporter& BR);
-
+
GRSimpleAPICheck *CreateAuditCFNumberCreate(ASTContext& Ctx,
BugReporter& BR);
-
+
GRSimpleAPICheck *CreateAuditCFRetainRelease(ASTContext& Ctx,
BugReporter& BR);
-
+
void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng, const Decl &D);
-
+
} // end clang namespace
#endif
diff --git a/lib/Analysis/BasicStore.cpp b/lib/Analysis/BasicStore.cpp
index 682feb5..388b2e9 100644
--- a/lib/Analysis/BasicStore.cpp
+++ b/lib/Analysis/BasicStore.cpp
@@ -20,10 +20,10 @@
using namespace clang;
-typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy;
+typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy;
namespace {
-
+
class VISIBILITY_HIDDEN BasicStoreSubRegionMap : public SubRegionMap {
public:
BasicStoreSubRegionMap() {}
@@ -32,13 +32,13 @@
return true; // Do nothing. No subregions.
}
};
-
+
class VISIBILITY_HIDDEN BasicStoreManager : public StoreManager {
BindingsTy::Factory VBFactory;
public:
BasicStoreManager(GRStateManager& mgr)
: StoreManager(mgr), VBFactory(mgr.getAllocator()) {}
-
+
~BasicStoreManager() {}
SubRegionMap *getSubRegionMap(const GRState *state) {
@@ -47,7 +47,7 @@
SValuator::CastResult Retrieve(const GRState *state, Loc loc,
QualType T = QualType());
-
+
const GRState *InvalidateRegion(const GRState *state, const MemRegion *R,
const Expr *E, unsigned Count);
@@ -57,8 +57,8 @@
Store scanForIvars(Stmt *B, const Decl* SelfDecl,
const MemRegion *SelfRegion, Store St);
-
- Store BindInternal(Store St, Loc loc, SVal V);
+
+ Store BindInternal(Store St, Loc loc, SVal V);
Store Remove(Store St, Loc loc);
Store getInitialStore(const LocationContext *InitLoc);
@@ -66,27 +66,27 @@
virtual Loc getLoc(const VarDecl* VD, const LocationContext *LC) {
return ValMgr.makeLoc(MRMgr.getVarRegion(VD, LC));
}
-
+
const GRState *BindCompoundLiteral(const GRState *state,
const CompoundLiteralExpr* cl,
SVal val) {
return state;
}
-
+
SVal getLValueVar(const GRState *state, const VarDecl *VD,
const LocationContext *LC);
SVal getLValueString(const GRState *state, const StringLiteral *S);
SVal getLValueCompoundLiteral(const GRState *state,
const CompoundLiteralExpr *CL);
SVal getLValueIvar(const GRState *state, const ObjCIvarDecl* D, SVal Base);
- SVal getLValueField(const GRState *state, SVal Base, const FieldDecl *D);
+ SVal getLValueField(const GRState *state, SVal Base, const FieldDecl *D);
SVal getLValueElement(const GRState *state, QualType elementType,
SVal Base, SVal Offset);
/// ArrayToPointer - Used by GRExprEngine::VistCast to handle implicit
/// conversions between arrays and pointers.
SVal ArrayToPointer(Loc Array) { return Array; }
-
+
/// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
/// It updatees the GRState object in place with the values removed.
void RemoveDeadBindings(GRState &state, Stmt* Loc, SymbolReaper& SymReaper,
@@ -118,7 +118,7 @@
private:
ASTContext& getContext() { return StateMgr.getContext(); }
};
-
+
} // end anonymous namespace
@@ -131,7 +131,7 @@
return ValMgr.makeLoc(MRMgr.getVarRegion(VD, LC));
}
-SVal BasicStoreManager::getLValueString(const GRState *state,
+SVal BasicStoreManager::getLValueString(const GRState *state,
const StringLiteral* S) {
return ValMgr.makeLoc(MRMgr.getStringRegion(S));
}
@@ -144,7 +144,7 @@
SVal BasicStoreManager::getLValueIvar(const GRState *state,
const ObjCIvarDecl* D,
SVal Base) {
-
+
if (Base.isUnknownOrUndef())
return Base;
@@ -154,7 +154,7 @@
const MemRegion *BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
return ValMgr.makeLoc(MRMgr.getObjCIvarRegion(D, BaseR));
}
-
+
return UnknownVal();
}
@@ -163,10 +163,10 @@
if (Base.isUnknownOrUndef())
return Base;
-
- Loc BaseL = cast<Loc>(Base);
+
+ Loc BaseL = cast<Loc>(Base);
const MemRegion* BaseR = 0;
-
+
switch(BaseL.getSubKind()) {
case loc::GotoLabelKind:
return UndefinedVal();
@@ -174,7 +174,7 @@
case loc::MemRegionKind:
BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
break;
-
+
case loc::ConcreteIntKind:
// While these seem funny, this can happen through casts.
// FIXME: What we should return is the field offset. For example,
@@ -186,7 +186,7 @@
assert ("Unhandled Base.");
return Base;
}
-
+
return ValMgr.makeLoc(MRMgr.getFieldRegion(D, BaseR));
}
@@ -196,18 +196,18 @@
if (Base.isUnknownOrUndef())
return Base;
-
- Loc BaseL = cast<Loc>(Base);
+
+ Loc BaseL = cast<Loc>(Base);
const MemRegion* BaseR = 0;
-
+
switch(BaseL.getSubKind()) {
case loc::GotoLabelKind:
// Technically we can get here if people do funny things with casts.
return UndefinedVal();
-
+
case loc::MemRegionKind: {
const MemRegion *R = cast<loc::MemRegionVal>(BaseL).getRegion();
-
+
if (isa<ElementRegion>(R)) {
// int x;
// char* y = (char*) &x;
@@ -215,12 +215,12 @@
// y[0] = 'a';
return Base;
}
-
+
if (isa<TypedRegion>(R) || isa<SymbolicRegion>(R)) {
BaseR = R;
break;
}
-
+
break;
}
@@ -230,13 +230,13 @@
// add the field offset to the integer value. That way funny things
// like this work properly: &(((struct foo *) 0xa)->f)
return Base;
-
+
default:
assert ("Unhandled Base.");
return Base;
}
-
- if (BaseR) {
+
+ if (BaseR) {
return ValMgr.makeLoc(MRMgr.getElementRegion(elementType, UnknownVal(),
BaseR, getContext()));
}
@@ -246,38 +246,38 @@
static bool isHigherOrderRawPtr(QualType T, ASTContext &C) {
bool foundPointer = false;
- while (1) {
+ while (1) {
const PointerType *PT = T->getAs<PointerType>();
if (!PT) {
if (!foundPointer)
return false;
-
+
// intptr_t* or intptr_t**, etc?
if (T->isIntegerType() && C.getTypeSize(T) == C.getTypeSize(C.VoidPtrTy))
return true;
-
+
QualType X = C.getCanonicalType(T).getUnqualifiedType();
return X == C.VoidTy;
}
-
+
foundPointer = true;
T = PT->getPointeeType();
- }
+ }
}
-
+
SValuator::CastResult BasicStoreManager::Retrieve(const GRState *state,
Loc loc, QualType T) {
-
+
if (isa<UnknownVal>(loc))
return SValuator::CastResult(state, UnknownVal());
-
+
assert(!isa<UndefinedVal>(loc));
-
+
switch (loc.getSubKind()) {
case loc::MemRegionKind: {
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
-
+
if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
// Just support void**, void***, intptr_t*, intptr_t**, etc., for now.
// This is needed to handle OSCompareAndSwapPtr() and friends.
@@ -286,45 +286,45 @@
if (!isHigherOrderRawPtr(T, Ctx))
return SValuator::CastResult(state, UnknownVal());
-
+
// FIXME: Should check for element 0.
// Otherwise, strip the element region.
R = ER->getSuperRegion();
}
-
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
return SValuator::CastResult(state, UnknownVal());
-
+
BindingsTy B = GetBindings(state->getStore());
BindingsTy::data_type *Val = B.lookup(R);
-
+
if (!Val)
break;
-
+
return CastRetrievedVal(*Val, state, cast<TypedRegion>(R), T);
}
-
+
case loc::ConcreteIntKind:
// Some clients may call GetSVal with such an option simply because
// they are doing a quick scan through their Locs (potentially to
// invalidate their bindings). Just return Undefined.
return SValuator::CastResult(state, UndefinedVal());
-
+
default:
assert (false && "Invalid Loc.");
break;
}
-
+
return SValuator::CastResult(state, UnknownVal());
}
-
-Store BasicStoreManager::BindInternal(Store store, Loc loc, SVal V) {
+
+Store BasicStoreManager::BindInternal(Store store, Loc loc, SVal V) {
if (isa<loc::ConcreteInt>(loc))
return store;
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
ASTContext &C = StateMgr.getContext();
-
+
// Special case: handle store of pointer values (Loc) to pointers via
// a cast to intXX_t*, void*, etc. This is needed to handle
// OSCompareAndSwap32Barrier/OSCompareAndSwap64Barrier.
@@ -332,20 +332,20 @@
if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
// FIXME: Should check for index 0.
QualType T = ER->getLocationType(C);
-
+
if (isHigherOrderRawPtr(T, C))
R = ER->getSuperRegion();
- }
-
+ }
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
return store;
const TypedRegion *TyR = cast<TypedRegion>(R);
-
+
// Do not bind to arrays. We need to explicitly check for this so that
// we do not encounter any weirdness of trying to load/store from arrays.
if (TyR->isBoundable() && TyR->getValueType(C)->isArrayType())
- return store;
+ return store;
if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&V)) {
// Only convert 'V' to a location iff the underlying region type
@@ -354,7 +354,7 @@
// a pointer. We may wish to flag a type error here if the types
// are incompatible. This may also cause lots of breakage
// elsewhere. Food for thought.
- if (TyR->isBoundable() && Loc::IsLocType(TyR->getValueType(C)))
+ if (TyR->isBoundable() && Loc::IsLocType(TyR->getValueType(C)))
V = X->getLoc();
}
@@ -368,10 +368,10 @@
switch (loc.getSubKind()) {
case loc::MemRegionKind: {
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
-
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
return store;
-
+
return VBFactory.Remove(GetBindings(store), R).getRoot();
}
default:
@@ -384,11 +384,11 @@
BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
-{
+{
Store store = state.getStore();
BindingsTy B = GetBindings(store);
typedef SVal::symbol_iterator symbol_iterator;
-
+
// Iterate over the variable bindings.
for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) {
@@ -402,20 +402,20 @@
}
else
continue;
-
+
// Mark the bindings in the data as live.
SVal X = I.getData();
for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
SymReaper.markLive(*SI);
}
-
+
// Scan for live variables and live symbols.
llvm::SmallPtrSet<const MemRegion*, 10> Marked;
-
+
while (!RegionRoots.empty()) {
const MemRegion* MR = RegionRoots.back();
RegionRoots.pop_back();
-
+
while (MR) {
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(MR)) {
SymReaper.markLive(SymR->getSymbol());
@@ -424,17 +424,17 @@
else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) {
if (Marked.count(MR))
break;
-
- Marked.insert(MR);
+
+ Marked.insert(MR);
SVal X = Retrieve(&state, loc::MemRegionVal(MR)).getSVal();
-
+
// FIXME: We need to handle symbols nested in region definitions.
for (symbol_iterator SI=X.symbol_begin(),SE=X.symbol_end();SI!=SE;++SI)
SymReaper.markLive(*SI);
-
+
if (!isa<loc::MemRegionVal>(X))
break;
-
+
const loc::MemRegionVal& LVD = cast<loc::MemRegionVal>(X);
RegionRoots.push_back(LVD.getRegion());
break;
@@ -445,15 +445,15 @@
break;
}
}
-
- // Remove dead variable bindings.
+
+ // Remove dead variable bindings.
for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
const MemRegion* R = I.getKey();
-
+
if (!Marked.count(R)) {
store = Remove(store, ValMgr.makeLoc(R));
SVal X = I.getData();
-
+
for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
SymReaper.maybeDead(*SI);
}
@@ -467,10 +467,10 @@
const MemRegion *SelfRegion, Store St) {
for (Stmt::child_iterator CI=B->child_begin(), CE=B->child_end();
CI != CE; ++CI) {
-
+
if (!*CI)
continue;
-
+
// Check if the statement is an ivar reference. We only
// care about self.ivar.
if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(*CI)) {
@@ -478,8 +478,8 @@
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Base)) {
if (DR->getDecl() == SelfDecl) {
const MemRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(),
- SelfRegion);
- SVal X = ValMgr.getRegionValueSymbolVal(IVR);
+ SelfRegion);
+ SVal X = ValMgr.getRegionValueSymbolVal(IVR);
St = BindInternal(St, ValMgr.makeLoc(IVR), X);
}
}
@@ -487,11 +487,11 @@
else
St = scanForIvars(*CI, SelfDecl, SelfRegion, St);
}
-
+
return St;
}
-Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
+Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
// The LiveVariables information already has a compilation of all VarDecls
// used in the function. Iterate through this set, and "symbolicate"
// any VarDecl whose value originally comes from outside the function.
@@ -504,7 +504,7 @@
// Handle implicit parameters.
if (ImplicitParamDecl* PD = dyn_cast<ImplicitParamDecl>(ND)) {
- const Decl& CD = *InitLoc->getDecl();
+ const Decl& CD = *InitLoc->getDecl();
if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CD)) {
if (MD->getSelfDecl() == PD) {
// FIXME: Just use a symbolic region, and remove ObjCObjectRegion
@@ -512,10 +512,10 @@
const ObjCObjectRegion *SelfRegion =
MRMgr.getObjCObjectRegion(MD->getClassInterface(),
MRMgr.getHeapRegion());
-
+
St = BindInternal(St, ValMgr.makeLoc(MRMgr.getVarRegion(PD, InitLoc)),
ValMgr.makeLoc(SelfRegion));
-
+
// Scan the method for ivar references. While this requires an
// entire AST scan, the cost should not be high in practice.
St = scanForIvars(MD->getBody(), PD, SelfRegion, St);
@@ -543,9 +543,9 @@
Store BasicStoreManager::BindDeclInternal(Store store, const VarDecl* VD,
const LocationContext *LC,
SVal* InitVal) {
-
+
BasicValueFactory& BasicVals = StateMgr.getBasicVals();
-
+
// BasicStore does not model arrays and structs.
if (VD->getType()->isArrayType() || VD->getType()->isStructureType())
return store;
@@ -560,14 +560,14 @@
// Static global variables should not be visited here.
assert(!(VD->getStorageClass() == VarDecl::Static &&
VD->isFileVarDecl()));
-
+
// Process static variables.
if (VD->getStorageClass() == VarDecl::Static) {
// C99: 6.7.8 Initialization
// If an object that has static storage duration is not initialized
- // explicitly, then:
- // —if it has pointer type, it is initialized to a null pointer;
- // —if it has arithmetic type, it is initialized to (positive or
+ // explicitly, then:
+ // —if it has pointer type, it is initialized to a null pointer;
+ // —if it has arithmetic type, it is initialized to (positive or
// unsigned) zero;
if (!InitVal) {
QualType T = VD->getType();
@@ -598,18 +598,18 @@
void BasicStoreManager::print(Store store, llvm::raw_ostream& Out,
const char* nl, const char *sep) {
-
+
BindingsTy B = GetBindings(store);
Out << "Variables:" << nl;
-
+
bool isFirst = true;
-
+
for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I) {
if (isFirst)
isFirst = false;
else
Out << nl;
-
+
Out << ' ' << I.getKey() << " : " << I.getData();
}
}
@@ -617,7 +617,7 @@
void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) {
BindingsTy B = GetBindings(store);
-
+
for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I)
f.HandleBinding(*this, store, I.getKey(), I.getData());
@@ -634,10 +634,10 @@
const Expr *E,
unsigned Count) {
R = R->getBaseRegion();
-
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
return state;
-
+
QualType T = cast<TypedRegion>(R)->getValueType(R->getContext());
SVal V = ValMgr.getConjuredSymbolVal(E, T, Count);
return Bind(state, loc::MemRegionVal(R), V);
diff --git a/lib/Analysis/BasicValueFactory.cpp b/lib/Analysis/BasicValueFactory.cpp
index 5ed6d22..b33c277 100644
--- a/lib/Analysis/BasicValueFactory.cpp
+++ b/lib/Analysis/BasicValueFactory.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This file defines BasicValueFactory, a class that manages the lifetime
-// of APSInt objects and symbolic constraints used by GRExprEngine
+// of APSInt objects and symbolic constraints used by GRExprEngine
// and related classes.
//
//===----------------------------------------------------------------------===//
@@ -17,7 +17,7 @@
using namespace clang;
-void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
+void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
llvm::ImmutableList<SVal> L) {
T.Profile(ID);
ID.AddPointer(L.getInternalPointer());
@@ -40,7 +40,7 @@
ID.AddPointer( (void*) X.second);
}
};
-
+
template<> struct FoldingSetTrait<SValPair> {
static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) {
X.first.Profile(ID);
@@ -61,8 +61,8 @@
// frees an aux. memory allocated to represent very large constants.
for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
I->getValue().~APSInt();
-
- delete (PersistentSValsTy*) PersistentSVals;
+
+ delete (PersistentSValsTy*) PersistentSVals;
delete (PersistentSValPairsTy*) PersistentSValPairs;
}
@@ -70,16 +70,16 @@
llvm::FoldingSetNodeID ID;
void* InsertPos;
typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
-
+
X.Profile(ID);
FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!P) {
+
+ if (!P) {
P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
new (P) FoldNodeTy(X);
APSIntSet.InsertNode(P, InsertPos);
}
-
+
return *P;
}
@@ -92,22 +92,22 @@
const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
bool isUnsigned) {
llvm::APSInt V(BitWidth, isUnsigned);
- V = X;
+ V = X;
return getValue(V);
}
const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
-
+
unsigned bits = Ctx.getTypeSize(T);
llvm::APSInt V(bits, T->isUnsignedIntegerType() || Loc::IsLocType(T));
V = X;
return getValue(V);
}
-const CompoundValData*
+const CompoundValData*
BasicValueFactory::getCompoundValData(QualType T,
llvm::ImmutableList<SVal> Vals) {
-
+
llvm::FoldingSetNodeID ID;
CompoundValData::Profile(ID, T, Vals);
void* InsertPos;
@@ -129,104 +129,104 @@
llvm::FoldingSetNodeID ID;
LazyCompoundValData::Profile(ID, state, region);
void* InsertPos;
-
+
LazyCompoundValData *D =
LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
-
+
if (!D) {
D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
new (D) LazyCompoundValData(state, region);
LazyCompoundValDataSet.InsertNode(D, InsertPos);
}
-
+
return D;
}
const llvm::APSInt*
BasicValueFactory::EvaluateAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1, const llvm::APSInt& V2) {
-
+
switch (Op) {
default:
assert (false && "Invalid Opcode.");
-
+
case BinaryOperator::Mul:
return &getValue( V1 * V2 );
-
+
case BinaryOperator::Div:
return &getValue( V1 / V2 );
-
+
case BinaryOperator::Rem:
return &getValue( V1 % V2 );
-
+
case BinaryOperator::Add:
return &getValue( V1 + V2 );
-
+
case BinaryOperator::Sub:
return &getValue( V1 - V2 );
-
+
case BinaryOperator::Shl: {
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
-
+
// FIXME: Expand these checks to include all undefined behavior.
-
+
if (V2.isSigned() && V2.isNegative())
return NULL;
-
+
uint64_t Amt = V2.getZExtValue();
-
+
if (Amt > V1.getBitWidth())
return NULL;
-
+
return &getValue( V1.operator<<( (unsigned) Amt ));
}
-
+
case BinaryOperator::Shr: {
-
+
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
-
+
// FIXME: Expand these checks to include all undefined behavior.
-
+
if (V2.isSigned() && V2.isNegative())
return NULL;
-
+
uint64_t Amt = V2.getZExtValue();
-
+
if (Amt > V1.getBitWidth())
return NULL;
-
+
return &getValue( V1.operator>>( (unsigned) Amt ));
}
-
+
case BinaryOperator::LT:
return &getTruthValue( V1 < V2 );
-
+
case BinaryOperator::GT:
return &getTruthValue( V1 > V2 );
-
+
case BinaryOperator::LE:
return &getTruthValue( V1 <= V2 );
-
+
case BinaryOperator::GE:
return &getTruthValue( V1 >= V2 );
-
+
case BinaryOperator::EQ:
return &getTruthValue( V1 == V2 );
-
+
case BinaryOperator::NE:
return &getTruthValue( V1 != V2 );
-
+
// Note: LAnd, LOr, Comma are handled specially by higher-level logic.
-
+
case BinaryOperator::And:
return &getValue( V1 & V2 );
-
+
case BinaryOperator::Or:
return &getValue( V1 | V2 );
-
+
case BinaryOperator::Xor:
return &getValue( V1 ^ V2 );
}
@@ -235,21 +235,21 @@
const std::pair<SVal, uintptr_t>&
BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
-
+
// Lazily create the folding set.
if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
-
+
llvm::FoldingSetNodeID ID;
void* InsertPos;
V.Profile(ID);
ID.AddPointer((void*) Data);
-
+
PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
-
+
typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!P) {
+
+ if (!P) {
P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
new (P) FoldNodeTy(std::make_pair(V, Data));
Map.InsertNode(P, InsertPos);
@@ -260,31 +260,31 @@
const std::pair<SVal, SVal>&
BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
-
+
// Lazily create the folding set.
if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
-
+
llvm::FoldingSetNodeID ID;
void* InsertPos;
V1.Profile(ID);
V2.Profile(ID);
-
+
PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
-
+
typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!P) {
+
+ if (!P) {
P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
new (P) FoldNodeTy(std::make_pair(V1, V2));
Map.InsertNode(P, InsertPos);
}
-
+
return P->getValue();
}
const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
return &getPersistentSValWithData(X, 0).first;
-}
+}
diff --git a/lib/Analysis/BugReporter.cpp b/lib/Analysis/BugReporter.cpp
index e54a500..23ca53d 100644
--- a/lib/Analysis/BugReporter.cpp
+++ b/lib/Analysis/BugReporter.cpp
@@ -53,7 +53,7 @@
return SP->getStmt();
else if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P))
return BE->getSrc()->getTerminator();
-
+
return 0;
}
@@ -71,7 +71,7 @@
for (N = GetPredecessorNode(N); N; N = GetPredecessorNode(N))
if (const Stmt *S = GetStmt(N->getLocation()))
return S;
-
+
return 0;
}
@@ -92,30 +92,30 @@
default:
break;
}
-
+
// Some expressions don't have locations.
if (S->getLocStart().isInvalid())
continue;
-
+
return S;
}
-
+
return 0;
}
static inline const Stmt*
-GetCurrentOrPreviousStmt(const ExplodedNode* N) {
+GetCurrentOrPreviousStmt(const ExplodedNode* N) {
if (const Stmt *S = GetStmt(N->getLocation()))
return S;
-
+
return GetPreviousStmt(N);
}
-
+
static inline const Stmt*
-GetCurrentOrNextStmt(const ExplodedNode* N) {
+GetCurrentOrNextStmt(const ExplodedNode* N) {
if (const Stmt *S = GetStmt(N->getLocation()))
return S;
-
+
return GetNextStmt(N);
}
@@ -132,63 +132,62 @@
public:
NodeMapClosure(NodeBackMap *m) : M(*m) {}
~NodeMapClosure() {}
-
+
const ExplodedNode* getOriginalNode(const ExplodedNode* N) {
NodeBackMap::iterator I = M.find(N);
return I == M.end() ? 0 : I->second;
}
};
-
+
class VISIBILITY_HIDDEN PathDiagnosticBuilder : public BugReporterContext {
BugReport *R;
PathDiagnosticClient *PDC;
llvm::OwningPtr<ParentMap> PM;
NodeMapClosure NMC;
-public:
+public:
PathDiagnosticBuilder(GRBugReporter &br,
- BugReport *r, NodeBackMap *Backmap,
+ BugReport *r, NodeBackMap *Backmap,
PathDiagnosticClient *pdc)
: BugReporterContext(br),
- R(r), PDC(pdc), NMC(Backmap)
- {
+ R(r), PDC(pdc), NMC(Backmap) {
addVisitor(R);
}
-
+
PathDiagnosticLocation ExecutionContinues(const ExplodedNode* N);
-
+
PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream& os,
const ExplodedNode* N);
-
+
ParentMap& getParentMap() {
if (PM.get() == 0)
PM.reset(new ParentMap(getCodeDecl().getBody()));
return *PM.get();
}
-
+
const Stmt *getParent(const Stmt *S) {
return getParentMap().getParent(S);
}
-
+
virtual NodeMapClosure& getNodeResolver() { return NMC; }
BugReport& getReport() { return *R; }
PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
-
+
PathDiagnosticLocation
getEnclosingStmtLocation(const PathDiagnosticLocation &L) {
if (const Stmt *S = L.asStmt())
return getEnclosingStmtLocation(S);
-
+
return L;
}
-
+
PathDiagnosticClient::PathGenerationScheme getGenerationScheme() const {
return PDC ? PDC->getGenerationScheme() : PathDiagnosticClient::Extensive;
}
bool supportsLogicalOpControlFlow() const {
return PDC ? PDC->supportsLogicalOpControlFlow() : true;
- }
+ }
};
} // end anonymous namespace
@@ -197,10 +196,10 @@
if (const Stmt *S = GetNextStmt(N))
return PathDiagnosticLocation(S, getSourceManager());
- return FullSourceLoc(N->getLocationContext()->getDecl()->getBodyRBrace(),
+ return FullSourceLoc(N->getLocationContext()->getDecl()->getBodyRBrace(),
getSourceManager());
}
-
+
PathDiagnosticLocation
PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream& os,
const ExplodedNode* N) {
@@ -208,9 +207,9 @@
// Slow, but probably doesn't matter.
if (os.str().empty())
os << ' ';
-
+
const PathDiagnosticLocation &Loc = ExecutionContinues(N);
-
+
if (Loc.asStmt())
os << "Execution continues on line "
<< getSourceManager().getInstantiationLineNumber(Loc.asLocation())
@@ -219,16 +218,16 @@
os << "Execution jumps to the end of the "
<< (isa<ObjCMethodDecl>(N->getLocationContext()->getDecl()) ?
"method" : "function") << '.';
-
+
return Loc;
}
static bool IsNested(const Stmt *S, ParentMap &PM) {
if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S)))
return true;
-
+
const Stmt *Parent = PM.getParentIgnoreParens(S);
-
+
if (Parent)
switch (Parent->getStmtClass()) {
case Stmt::ForStmtClass:
@@ -238,29 +237,29 @@
default:
break;
}
-
- return false;
+
+ return false;
}
PathDiagnosticLocation
PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
assert(S && "Null Stmt* passed to getEnclosingStmtLocation");
- ParentMap &P = getParentMap();
+ ParentMap &P = getParentMap();
SourceManager &SMgr = getSourceManager();
while (IsNested(S, P)) {
const Stmt *Parent = P.getParentIgnoreParens(S);
-
+
if (!Parent)
break;
-
+
switch (Parent->getStmtClass()) {
case Stmt::BinaryOperatorClass: {
const BinaryOperator *B = cast<BinaryOperator>(Parent);
if (B->isLogicalOp())
return PathDiagnosticLocation(S, SMgr);
break;
- }
+ }
case Stmt::CompoundStmtClass:
case Stmt::StmtExprClass:
return PathDiagnosticLocation(S, SMgr);
@@ -270,20 +269,20 @@
if (cast<ChooseExpr>(Parent)->getCond() == S)
return PathDiagnosticLocation(Parent, SMgr);
else
- return PathDiagnosticLocation(S, SMgr);
+ return PathDiagnosticLocation(S, SMgr);
case Stmt::ConditionalOperatorClass:
// For '?', if we are referring to condition, just have the edge point
// to the entire '?' expression.
if (cast<ConditionalOperator>(Parent)->getCond() == S)
return PathDiagnosticLocation(Parent, SMgr);
else
- return PathDiagnosticLocation(S, SMgr);
+ return PathDiagnosticLocation(S, SMgr);
case Stmt::DoStmtClass:
- return PathDiagnosticLocation(S, SMgr);
+ return PathDiagnosticLocation(S, SMgr);
case Stmt::ForStmtClass:
if (cast<ForStmt>(Parent)->getBody() == S)
- return PathDiagnosticLocation(S, SMgr);
- break;
+ return PathDiagnosticLocation(S, SMgr);
+ break;
case Stmt::IfStmtClass:
if (cast<IfStmt>(Parent)->getCond() != S)
return PathDiagnosticLocation(S, SMgr);
@@ -302,7 +301,7 @@
S = Parent;
}
-
+
assert(S && "Cannot have null Stmt for PathDiagnosticLocation");
// Special case: DeclStmts can appear in for statement declarations, in which
@@ -315,8 +314,8 @@
return PathDiagnosticLocation(Parent, SMgr);
default:
break;
- }
- }
+ }
+ }
}
else if (isa<BinaryOperator>(S)) {
// Special case: the binary operator represents the initialization
@@ -339,84 +338,84 @@
static const VarDecl*
GetMostRecentVarDeclBinding(const ExplodedNode* N,
GRStateManager& VMgr, SVal X) {
-
+
for ( ; N ; N = N->pred_empty() ? 0 : *N->pred_begin()) {
-
+
ProgramPoint P = N->getLocation();
-
+
if (!isa<PostStmt>(P))
continue;
-
+
const DeclRefExpr* DR = dyn_cast<DeclRefExpr>(cast<PostStmt>(P).getStmt());
-
+
if (!DR)
continue;
-
+
SVal Y = N->getState()->getSVal(DR);
-
+
if (X != Y)
continue;
-
+
const VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl());
-
+
if (!VD)
continue;
-
+
return VD;
}
-
+
return 0;
}
namespace {
-class VISIBILITY_HIDDEN NotableSymbolHandler
+class VISIBILITY_HIDDEN NotableSymbolHandler
: public StoreManager::BindingsHandler {
-
+
SymbolRef Sym;
const GRState* PrevSt;
const Stmt* S;
GRStateManager& VMgr;
const ExplodedNode* Pred;
- PathDiagnostic& PD;
+ PathDiagnostic& PD;
BugReporter& BR;
-
+
public:
-
+
NotableSymbolHandler(SymbolRef sym, const GRState* prevst, const Stmt* s,
GRStateManager& vmgr, const ExplodedNode* pred,
PathDiagnostic& pd, BugReporter& br)
: Sym(sym), PrevSt(prevst), S(s), VMgr(vmgr), Pred(pred), PD(pd), BR(br) {}
-
+
bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
SVal V) {
-
+
SymbolRef ScanSym = V.getAsSymbol();
-
+
if (ScanSym != Sym)
return true;
-
- // Check if the previous state has this binding.
+
+ // Check if the previous state has this binding.
SVal X = PrevSt->getSVal(loc::MemRegionVal(R));
-
+
if (X == V) // Same binding?
return true;
-
+
// Different binding. Only handle assignments for now. We don't pull
- // this check out of the loop because we will eventually handle other
+ // this check out of the loop because we will eventually handle other
// cases.
-
+
VarDecl *VD = 0;
-
+
if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
if (!B->isAssignmentOp())
return true;
-
+
// What variable did we assign to?
DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenCasts());
-
+
if (!DR)
return true;
-
+
VD = dyn_cast<VarDecl>(DR->getDecl());
}
else if (const DeclStmt* DS = dyn_cast<DeclStmt>(S)) {
@@ -425,28 +424,28 @@
// holds by contruction in the CFG.
VD = dyn_cast<VarDecl>(*DS->decl_begin());
}
-
+
if (!VD)
return true;
-
+
// What is the most recently referenced variable with this binding?
const VarDecl* MostRecent = GetMostRecentVarDeclBinding(Pred, VMgr, V);
-
+
if (!MostRecent)
return true;
-
+
// Create the diagnostic.
FullSourceLoc L(S->getLocStart(), BR.getSourceManager());
-
+
if (Loc::IsLocType(VD->getType())) {
std::string msg = "'" + std::string(VD->getNameAsString()) +
"' now aliases '" + MostRecent->getNameAsString() + "'";
-
+
PD.push_front(new PathDiagnosticEventPiece(L, msg));
}
-
+
return true;
- }
+ }
};
}
@@ -454,13 +453,13 @@
const Stmt* S,
SymbolRef Sym, BugReporter& BR,
PathDiagnostic& PD) {
-
+
const ExplodedNode* Pred = N->pred_empty() ? 0 : *N->pred_begin();
const GRState* PrevSt = Pred ? Pred->getState() : 0;
-
+
if (!PrevSt)
return;
-
+
// Look at the region bindings of the current state that map to the
// specified symbol. Are any of them not in the previous state?
GRStateManager& VMgr = cast<GRBugReporter>(BR).getStateManager();
@@ -471,34 +470,34 @@
namespace {
class VISIBILITY_HIDDEN ScanNotableSymbols
: public StoreManager::BindingsHandler {
-
+
llvm::SmallSet<SymbolRef, 10> AlreadyProcessed;
const ExplodedNode* N;
const Stmt* S;
GRBugReporter& BR;
PathDiagnostic& PD;
-
+
public:
ScanNotableSymbols(const ExplodedNode* n, const Stmt* s,
GRBugReporter& br, PathDiagnostic& pd)
: N(n), S(s), BR(br), PD(pd) {}
-
+
bool HandleBinding(StoreManager& SMgr, Store store,
const MemRegion* R, SVal V) {
-
+
SymbolRef ScanSym = V.getAsSymbol();
-
+
if (!ScanSym)
return true;
-
+
if (!BR.isNotable(ScanSym))
return true;
-
+
if (AlreadyProcessed.count(ScanSym))
return true;
-
+
AlreadyProcessed.insert(ScanSym);
-
+
HandleNotableSymbol(N, S, ScanSym, BR, PD);
return true;
}
@@ -516,54 +515,54 @@
const ExplodedNode *N) {
SourceManager& SMgr = PDB.getSourceManager();
- const ExplodedNode* NextNode = N->pred_empty()
+ const ExplodedNode* NextNode = N->pred_empty()
? NULL : *(N->pred_begin());
while (NextNode) {
- N = NextNode;
+ N = NextNode;
NextNode = GetPredecessorNode(N);
-
+
ProgramPoint P = N->getLocation();
-
+
if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P)) {
CFGBlock* Src = BE->getSrc();
CFGBlock* Dst = BE->getDst();
Stmt* T = Src->getTerminator();
-
+
if (!T)
continue;
-
+
FullSourceLoc Start(T->getLocStart(), SMgr);
-
+
switch (T->getStmtClass()) {
default:
break;
-
+
case Stmt::GotoStmtClass:
- case Stmt::IndirectGotoStmtClass: {
+ case Stmt::IndirectGotoStmtClass: {
const Stmt* S = GetNextStmt(N);
-
+
if (!S)
continue;
-
+
std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
+ llvm::raw_string_ostream os(sbuf);
const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
-
+
os << "Control jumps to line "
<< End.asLocation().getInstantiationLineNumber();
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
break;
}
-
- case Stmt::SwitchStmtClass: {
+
+ case Stmt::SwitchStmtClass: {
// Figure out what case arm we took.
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
if (Stmt* S = Dst->getLabel()) {
PathDiagnosticLocation End(S, SMgr);
-
+
switch (S->getStmtClass()) {
default:
os << "No cases match in the switch statement. "
@@ -574,21 +573,21 @@
os << "Control jumps to the 'default' case at line "
<< End.asLocation().getInstantiationLineNumber();
break;
-
+
case Stmt::CaseStmtClass: {
- os << "Control jumps to 'case ";
- CaseStmt* Case = cast<CaseStmt>(S);
+ os << "Control jumps to 'case ";
+ CaseStmt* Case = cast<CaseStmt>(S);
Expr* LHS = Case->getLHS()->IgnoreParenCasts();
-
- // Determine if it is an enum.
+
+ // Determine if it is an enum.
bool GetRawInt = true;
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS)) {
// FIXME: Maybe this should be an assertion. Are there cases
// were it is not an EnumConstantDecl?
EnumConstantDecl* D =
dyn_cast<EnumConstantDecl>(DR->getDecl());
-
+
if (D) {
GetRawInt = false;
os << D->getNameAsString();
@@ -608,14 +607,14 @@
}
else {
os << "'Default' branch taken. ";
- const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
+ const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
-
+
break;
}
-
+
case Stmt::BreakStmtClass:
case Stmt::ContinueStmtClass: {
std::string sbuf;
@@ -625,117 +624,117 @@
os.str()));
break;
}
-
+
// Determine control-flow for ternary '?'.
case Stmt::ConditionalOperatorClass: {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "'?' condition is ";
-
+
if (*(Src->succ_begin()+1) == Dst)
os << "false";
else
os << "true";
-
+
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
break;
}
-
+
// Determine control-flow for short-circuited '&&' and '||'.
case Stmt::BinaryOperatorClass: {
if (!PDB.supportsLogicalOpControlFlow())
break;
-
+
BinaryOperator *B = cast<BinaryOperator>(T);
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Left side of '";
-
+
if (B->getOpcode() == BinaryOperator::LAnd) {
os << "&&" << "' is ";
-
+
if (*(Src->succ_begin()+1) == Dst) {
os << "false";
PathDiagnosticLocation End(B->getLHS(), SMgr);
PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
- }
+ }
else {
os << "true";
PathDiagnosticLocation Start(B->getLHS(), SMgr);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
- }
+ }
}
else {
assert(B->getOpcode() == BinaryOperator::LOr);
os << "||" << "' is ";
-
+
if (*(Src->succ_begin()+1) == Dst) {
os << "false";
PathDiagnosticLocation Start(B->getLHS(), SMgr);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ os.str()));
}
else {
os << "true";
PathDiagnosticLocation End(B->getLHS(), SMgr);
PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ os.str()));
}
}
-
+
break;
}
-
- case Stmt::DoStmtClass: {
+
+ case Stmt::DoStmtClass: {
if (*(Src->succ_begin()) == Dst) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "Loop condition is true. ";
PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
else {
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Loop condition is false. Exiting loop"));
}
-
+
break;
}
-
+
case Stmt::WhileStmtClass:
- case Stmt::ForStmtClass: {
+ case Stmt::ForStmtClass: {
if (*(Src->succ_begin()+1) == Dst) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "Loop condition is false. ";
PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
@@ -743,32 +742,32 @@
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Loop condition is true. Entering loop body"));
}
-
+
break;
}
-
+
case Stmt::IfStmtClass: {
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
if (*(Src->succ_begin()+1) == Dst)
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Taking false branch"));
- else
+ else
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Taking true branch"));
-
+
break;
}
}
}
-
+
if (NextNode) {
for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
E = PDB.visitor_end(); I!=E; ++I) {
@@ -776,15 +775,15 @@
PD.push_front(p);
}
}
-
- if (const PostStmt* PS = dyn_cast<PostStmt>(&P)) {
+
+ if (const PostStmt* PS = dyn_cast<PostStmt>(&P)) {
// Scan the region bindings, and see if a "notable" symbol has a new
// lval binding.
ScanNotableSymbols SNS(N, PS->getStmt(), PDB.getBugReporter(), PD);
PDB.getStateManager().iterBindings(N->getState(), SNS);
}
}
-
+
// After constructing the full PathDiagnostic, do a pass over it to compact
// PathDiagnosticPieces that occur within a macro.
CompactPathDiagnostic(PD, PDB.getSourceManager());
@@ -796,20 +795,20 @@
static bool IsControlFlowExpr(const Stmt *S) {
const Expr *E = dyn_cast<Expr>(S);
-
+
if (!E)
return false;
-
- E = E->IgnoreParenCasts();
-
+
+ E = E->IgnoreParenCasts();
+
if (isa<ConditionalOperator>(E))
return true;
-
+
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
if (B->isLogicalOp())
return true;
-
- return false;
+
+ return false;
}
namespace {
@@ -818,25 +817,25 @@
public:
ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
: PathDiagnosticLocation(L), IsDead(isdead) {}
-
- void markDead() { IsDead = true; }
+
+ void markDead() { IsDead = true; }
bool isDead() const { return IsDead; }
};
-
+
class VISIBILITY_HIDDEN EdgeBuilder {
std::vector<ContextLocation> CLocs;
typedef std::vector<ContextLocation>::iterator iterator;
PathDiagnostic &PD;
PathDiagnosticBuilder &PDB;
PathDiagnosticLocation PrevLoc;
-
+
bool IsConsumedExpr(const PathDiagnosticLocation &L);
-
+
bool containsLocation(const PathDiagnosticLocation &Container,
const PathDiagnosticLocation &Containee);
-
+
PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
-
+
PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
bool firstCharOnly = false) {
if (const Stmt *S = L.asStmt()) {
@@ -864,20 +863,20 @@
firstCharOnly = true;
continue;
}
-
+
break;
}
-
+
if (S != Original)
L = PathDiagnosticLocation(S, L.getManager());
}
-
+
if (firstCharOnly)
L = PathDiagnosticLocation(L.asLocation());
return L;
}
-
+
void popLocation() {
if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
// For contexts, we only one the first character as the range.
@@ -885,18 +884,18 @@
}
CLocs.pop_back();
}
-
- PathDiagnosticLocation IgnoreParens(const PathDiagnosticLocation &L);
+
+ PathDiagnosticLocation IgnoreParens(const PathDiagnosticLocation &L);
public:
EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
: PD(pd), PDB(pdb) {
-
+
// If the PathDiagnostic already has pieces, add the enclosing statement
// of the first piece as a context as well.
if (!PD.empty()) {
PrevLoc = PD.begin()->getLocation();
-
+
if (const Stmt *S = PrevLoc.asStmt())
addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
}
@@ -904,7 +903,7 @@
~EdgeBuilder() {
while (!CLocs.empty()) popLocation();
-
+
// Finally, add an initial edge from the start location of the first
// statement (if it doesn't already exist).
// FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
@@ -914,20 +913,20 @@
SourceLocation Loc = (*CS->body_begin())->getLocStart();
rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager()));
}
-
+
}
void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false);
-
+
void addEdge(const Stmt *S, bool alwaysAdd = false) {
addEdge(PathDiagnosticLocation(S, PDB.getSourceManager()), alwaysAdd);
}
-
+
void rawAddEdge(PathDiagnosticLocation NewLoc);
-
+
void addContext(const Stmt *S);
void addExtendedContext(const Stmt *S);
-};
+};
} // end anonymous namespace
@@ -936,10 +935,10 @@
if (const Stmt *S = L.asStmt()) {
if (IsControlFlowExpr(S))
return L;
-
- return PDB.getEnclosingStmtLocation(S);
+
+ return PDB.getEnclosingStmtLocation(S);
}
-
+
return L;
}
@@ -948,10 +947,10 @@
if (Container == Containee)
return true;
-
+
if (Container.asDecl())
return true;
-
+
if (const Stmt *S = Containee.asStmt())
if (const Stmt *ContainerS = Container.asStmt()) {
while (S) {
@@ -965,25 +964,25 @@
// Less accurate: compare using source ranges.
SourceRange ContainerR = Container.asRange();
SourceRange ContaineeR = Containee.asRange();
-
+
SourceManager &SM = PDB.getSourceManager();
SourceLocation ContainerRBeg = SM.getInstantiationLoc(ContainerR.getBegin());
SourceLocation ContainerREnd = SM.getInstantiationLoc(ContainerR.getEnd());
SourceLocation ContaineeRBeg = SM.getInstantiationLoc(ContaineeR.getBegin());
SourceLocation ContaineeREnd = SM.getInstantiationLoc(ContaineeR.getEnd());
-
+
unsigned ContainerBegLine = SM.getInstantiationLineNumber(ContainerRBeg);
unsigned ContainerEndLine = SM.getInstantiationLineNumber(ContainerREnd);
unsigned ContaineeBegLine = SM.getInstantiationLineNumber(ContaineeRBeg);
unsigned ContaineeEndLine = SM.getInstantiationLineNumber(ContaineeREnd);
-
+
assert(ContainerBegLine <= ContainerEndLine);
- assert(ContaineeBegLine <= ContaineeEndLine);
-
+ assert(ContaineeBegLine <= ContaineeEndLine);
+
return (ContainerBegLine <= ContaineeBegLine &&
ContainerEndLine >= ContaineeEndLine &&
(ContainerBegLine != ContaineeBegLine ||
- SM.getInstantiationColumnNumber(ContainerRBeg) <=
+ SM.getInstantiationColumnNumber(ContainerRBeg) <=
SM.getInstantiationColumnNumber(ContaineeRBeg)) &&
(ContainerEndLine != ContaineeEndLine ||
SM.getInstantiationColumnNumber(ContainerREnd) >=
@@ -1003,13 +1002,13 @@
PrevLoc = NewLoc;
return;
}
-
+
const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc);
const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc);
-
+
if (NewLocClean.asLocation() == PrevLocClean.asLocation())
return;
-
+
// FIXME: Ignore intra-macro edges for now.
if (NewLocClean.asLocation().getInstantiationLoc() ==
PrevLocClean.asLocation().getInstantiationLoc())
@@ -1020,15 +1019,15 @@
}
void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
-
+
if (!alwaysAdd && NewLoc.asLocation().isMacroID())
return;
-
+
const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
while (!CLocs.empty()) {
ContextLocation &TopContextLoc = CLocs.back();
-
+
// Is the top location context the same as the one for the new location?
if (TopContextLoc == CLoc) {
if (alwaysAdd) {
@@ -1045,21 +1044,21 @@
if (containsLocation(TopContextLoc, CLoc)) {
if (alwaysAdd) {
rawAddEdge(NewLoc);
-
+
if (IsConsumedExpr(CLoc) && !IsControlFlowExpr(CLoc.asStmt())) {
CLocs.push_back(ContextLocation(CLoc, true));
return;
}
}
-
+
CLocs.push_back(CLoc);
- return;
+ return;
}
// Context does not contain the location. Flush it.
popLocation();
}
-
+
// If we reach here, there is no enclosing context. Just add the edge.
rawAddEdge(NewLoc);
}
@@ -1067,15 +1066,15 @@
bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
-
+
return false;
}
-
+
void EdgeBuilder::addExtendedContext(const Stmt *S) {
if (!S)
return;
-
- const Stmt *Parent = PDB.getParent(S);
+
+ const Stmt *Parent = PDB.getParent(S);
while (Parent) {
if (isa<CompoundStmt>(Parent))
Parent = PDB.getParent(Parent);
@@ -1092,16 +1091,16 @@
break;
}
}
-
+
addContext(S);
}
-
+
void EdgeBuilder::addContext(const Stmt *S) {
if (!S)
return;
PathDiagnosticLocation L(S, PDB.getSourceManager());
-
+
while (!CLocs.empty()) {
const PathDiagnosticLocation &TopContextLoc = CLocs.back();
@@ -1111,7 +1110,7 @@
if (containsLocation(TopContextLoc, L)) {
CLocs.push_back(L);
- return;
+ return;
}
// Context does not contain the location. Flush it.
@@ -1124,11 +1123,11 @@
static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
const ExplodedNode *N) {
-
-
+
+
EdgeBuilder EB(PD, PDB);
- const ExplodedNode* NextNode = N->pred_empty()
+ const ExplodedNode* NextNode = N->pred_empty()
? NULL : *(N->pred_begin());
while (NextNode) {
N = NextNode;
@@ -1140,26 +1139,26 @@
if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
const CFGBlock &Blk = *BE->getSrc();
const Stmt *Term = Blk.getTerminator();
-
+
// Are we jumping to the head of a loop? Add a special diagnostic.
if (const Stmt *Loop = BE->getDst()->getLoopTarget()) {
PathDiagnosticLocation L(Loop, PDB.getSourceManager());
const CompoundStmt *CS = NULL;
-
+
if (!Term) {
if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
CS = dyn_cast<CompoundStmt>(FS->getBody());
else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
- CS = dyn_cast<CompoundStmt>(WS->getBody());
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
}
-
+
PathDiagnosticEventPiece *p =
new PathDiagnosticEventPiece(L,
"Looping back to the head of the loop");
-
+
EB.addEdge(p->getLocation(), true);
PD.push_front(p);
-
+
if (CS) {
PathDiagnosticLocation BL(CS->getRBracLoc(),
PDB.getSourceManager());
@@ -1167,14 +1166,14 @@
EB.addEdge(BL);
}
}
-
+
if (Term)
EB.addContext(Term);
-
+
break;
}
- if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
if (const Stmt* S = BE->getFirstStmt()) {
if (IsControlFlowExpr(S)) {
// Add the proper context for '&&', '||', and '?'.
@@ -1187,10 +1186,10 @@
break;
}
} while (0);
-
+
if (!NextNode)
continue;
-
+
for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
E = PDB.visitor_end(); I!=E; ++I) {
if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB)) {
@@ -1198,9 +1197,9 @@
EB.addEdge(Loc, true);
PD.push_front(p);
if (const Stmt *S = Loc.asStmt())
- EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
}
- }
+ }
}
}
@@ -1216,46 +1215,46 @@
BugReport::~BugReport() {}
RangedBugReport::~RangedBugReport() {}
-const Stmt* BugReport::getStmt() const {
- ProgramPoint ProgP = EndNode->getLocation();
+const Stmt* BugReport::getStmt() const {
+ ProgramPoint ProgP = EndNode->getLocation();
const Stmt *S = NULL;
-
+
if (BlockEntrance* BE = dyn_cast<BlockEntrance>(&ProgP)) {
CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
if (BE->getBlock() == &Exit)
S = GetPreviousStmt(EndNode);
}
if (!S)
- S = GetStmt(ProgP);
-
- return S;
+ S = GetStmt(ProgP);
+
+ return S;
}
PathDiagnosticPiece*
BugReport::getEndPath(BugReporterContext& BRC,
const ExplodedNode* EndPathNode) {
-
+
const Stmt* S = getStmt();
-
+
if (!S)
return NULL;
const SourceRange *Beg, *End;
- getRanges(Beg, End);
+ getRanges(Beg, End);
PathDiagnosticLocation L(S, BRC.getSourceManager());
-
+
// Only add the statement itself as a range if we didn't specify any
// special ranges for this report.
PathDiagnosticPiece* P = new PathDiagnosticEventPiece(L, getDescription(),
Beg == End);
-
+
for (; Beg != End; ++Beg)
P->addRange(*Beg);
-
+
return P;
}
-void BugReport::getRanges(const SourceRange*& beg, const SourceRange*& end) {
+void BugReport::getRanges(const SourceRange*& beg, const SourceRange*& end) {
if (const Expr* E = dyn_cast_or_null<Expr>(getStmt())) {
R = E->getSourceRange();
assert(R.isValid());
@@ -1266,7 +1265,7 @@
beg = end = 0;
}
-SourceLocation BugReport::getLocation() const {
+SourceLocation BugReport::getLocation() const {
if (EndNode)
if (const Stmt* S = GetCurrentOrPreviousStmt(EndNode)) {
// For member expressions, return the location of the '.' or '->'.
@@ -1325,8 +1324,8 @@
BugReportEquivClass& EQ = *EI;
FlushReport(EQ);
}
-
- // Delete the BugType object.
+
+ // Delete the BugType object.
// FIXME: this will *not* delete the BugReportEquivClasses, since FoldingSet
// only deletes the buckets, not the nodes themselves.
@@ -1346,9 +1345,9 @@
MakeReportGraph(const ExplodedGraph* G,
const ExplodedNode** NStart,
const ExplodedNode** NEnd) {
-
+
// Create the trimmed graph. It will contain the shortest paths from the
- // error nodes to the root. In the new graph we should only have one
+ // error nodes to the root. In the new graph we should only have one
// error node unless there are two or more error nodes with the same minimum
// path length.
ExplodedGraph* GTrim;
@@ -1356,12 +1355,12 @@
llvm::DenseMap<const void*, const void*> InverseMap;
llvm::tie(GTrim, NMap) = G->Trim(NStart, NEnd, &InverseMap);
-
+
// Create owning pointers for GTrim and NMap just to ensure that they are
// released when this function exists.
llvm::OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim);
llvm::OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap);
-
+
// Find the (first) error node in the trimmed graph. We just need to consult
// the node map (NMap) which maps from nodes in the original graph to nodes
// in the new graph.
@@ -1376,68 +1375,68 @@
WS.push(N);
IndexMap[*I] = NodeIndex;
}
-
+
assert(!WS.empty() && "No error node found in the trimmed graph.");
// Create a new (third!) graph with a single path. This is the graph
// that will be returned to the caller.
ExplodedGraph *GNew = new ExplodedGraph(GTrim->getContext());
-
+
// Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
// to the root node, and then construct a new graph that contains only
// a single path.
llvm::DenseMap<const void*,unsigned> Visited;
-
+
unsigned cnt = 0;
const ExplodedNode* Root = 0;
-
+
while (!WS.empty()) {
const ExplodedNode* Node = WS.front();
WS.pop();
-
+
if (Visited.find(Node) != Visited.end())
continue;
-
+
Visited[Node] = cnt++;
-
+
if (Node->pred_empty()) {
Root = Node;
break;
}
-
+
for (ExplodedNode::const_pred_iterator I=Node->pred_begin(),
E=Node->pred_end(); I!=E; ++I)
WS.push(*I);
}
-
+
assert(Root);
-
+
// Now walk from the root down the BFS path, always taking the successor
// with the lowest number.
- ExplodedNode *Last = 0, *First = 0;
+ ExplodedNode *Last = 0, *First = 0;
NodeBackMap *BM = new NodeBackMap();
unsigned NodeIndex = 0;
-
+
for ( const ExplodedNode *N = Root ;;) {
// Lookup the number associated with the current node.
llvm::DenseMap<const void*,unsigned>::iterator I = Visited.find(N);
assert(I != Visited.end());
-
+
// Create the equivalent node in the new graph with the same state
// and location.
ExplodedNode* NewN = GNew->getNode(N->getLocation(), N->getState());
-
+
// Store the mapping to the original node.
llvm::DenseMap<const void*, const void*>::iterator IMitr=InverseMap.find(N);
assert(IMitr != InverseMap.end() && "No mapping to original node.");
(*BM)[NewN] = (const ExplodedNode*) IMitr->second;
-
+
// Link up the new node with the previous node.
if (Last)
NewN->addPredecessor(Last);
-
+
Last = NewN;
-
+
// Are we at the final node?
IndexMapTy::iterator IMI =
IndexMap.find((const ExplodedNode*)(IMitr->second));
@@ -1446,29 +1445,29 @@
NodeIndex = IMI->second;
break;
}
-
+
// Find the next successor node. We choose the node that is marked
// with the lowest DFS number.
ExplodedNode::const_succ_iterator SI = N->succ_begin();
ExplodedNode::const_succ_iterator SE = N->succ_end();
N = 0;
-
+
for (unsigned MinVal = 0; SI != SE; ++SI) {
-
+
I = Visited.find(*SI);
-
+
if (I == Visited.end())
continue;
-
+
if (!N || I->second < MinVal) {
N = *SI;
MinVal = I->second;
}
}
-
+
assert(N);
}
-
+
assert(First);
return std::make_pair(std::make_pair(GNew, BM),
@@ -1480,23 +1479,23 @@
static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
typedef std::vector<std::pair<PathDiagnosticMacroPiece*, SourceLocation> >
MacroStackTy;
-
+
typedef std::vector<PathDiagnosticPiece*>
PiecesTy;
-
+
MacroStackTy MacroStack;
PiecesTy Pieces;
-
+
for (PathDiagnostic::iterator I = PD.begin(), E = PD.end(); I!=E; ++I) {
// Get the location of the PathDiagnosticPiece.
- const FullSourceLoc Loc = I->getLocation().asLocation();
-
+ const FullSourceLoc Loc = I->getLocation().asLocation();
+
// Determine the instantiation location, which is the location we group
// related PathDiagnosticPieces.
- SourceLocation InstantiationLoc = Loc.isMacroID() ?
+ SourceLocation InstantiationLoc = Loc.isMacroID() ?
SM.getInstantiationLoc(Loc) :
SourceLocation();
-
+
if (Loc.isFileID()) {
MacroStack.clear();
Pieces.push_back(&*I);
@@ -1504,7 +1503,7 @@
}
assert(Loc.isMacroID());
-
+
// Is the PathDiagnosticPiece within the same macro group?
if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
MacroStack.back().first->push_back(&*I);
@@ -1518,22 +1517,22 @@
SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
SM.getInstantiationLoc(Loc) :
SourceLocation();
-
+
// Walk the entire macro stack.
while (!MacroStack.empty()) {
if (InstantiationLoc == MacroStack.back().second) {
MacroGroup = MacroStack.back().first;
break;
}
-
+
if (ParentInstantiationLoc == MacroStack.back().second) {
MacroGroup = MacroStack.back().first;
break;
}
-
+
MacroStack.pop_back();
}
-
+
if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) {
// Create a new macro group and add it to the stack.
PathDiagnosticMacroPiece *NewGroup = new PathDiagnosticMacroPiece(Loc);
@@ -1544,7 +1543,7 @@
assert(InstantiationLoc.isFileID());
Pieces.push_back(NewGroup);
}
-
+
MacroGroup = NewGroup;
MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc));
}
@@ -1552,62 +1551,62 @@
// Finally, add the PathDiagnosticPiece to the group.
MacroGroup->push_back(&*I);
}
-
+
// Now take the pieces and construct a new PathDiagnostic.
PD.resetPath(false);
-
+
for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I) {
if (PathDiagnosticMacroPiece *MP=dyn_cast<PathDiagnosticMacroPiece>(*I))
if (!MP->containsEvent()) {
delete MP;
continue;
}
-
+
PD.push_back(*I);
}
}
void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
BugReportEquivClass& EQ) {
-
+
std::vector<const ExplodedNode*> Nodes;
-
+
for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
const ExplodedNode* N = I->getEndNode();
if (N) Nodes.push_back(N);
}
-
+
if (Nodes.empty())
return;
-
+
// Construct a new graph that contains only a single path from the error
- // node to a root.
+ // node to a root.
const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
std::pair<ExplodedNode*, unsigned> >&
GPair = MakeReportGraph(&getGraph(), &Nodes[0], &Nodes[0] + Nodes.size());
-
+
// Find the BugReport with the original location.
BugReport *R = 0;
unsigned i = 0;
for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I, ++i)
if (i == GPair.second.second) { R = *I; break; }
-
+
assert(R && "No original report found for sliced graph.");
-
+
llvm::OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
llvm::OwningPtr<NodeBackMap> BackMap(GPair.first.second);
const ExplodedNode *N = GPair.second.first;
-
- // Start building the path diagnostic...
+
+ // Start building the path diagnostic...
PathDiagnosticBuilder PDB(*this, R, BackMap.get(), getPathDiagnosticClient());
-
+
if (PathDiagnosticPiece* Piece = R->getEndPath(PDB, N))
PD.push_back(Piece);
else
return;
-
+
R->registerInitialVisitors(PDB, N);
-
+
switch (PDB.getGenerationScheme()) {
case PathDiagnosticClient::Extensive:
GenerateExtensivePathDiagnostic(PD, PDB, N);
@@ -1622,17 +1621,17 @@
BugTypes = F.Add(BugTypes, BT);
}
-void BugReporter::EmitReport(BugReport* R) {
+void BugReporter::EmitReport(BugReport* R) {
// Compute the bug report's hash to determine its equivalence class.
llvm::FoldingSetNodeID ID;
R->Profile(ID);
-
- // Lookup the equivance class. If there isn't one, create it.
+
+ // Lookup the equivance class. If there isn't one, create it.
BugType& BT = R->getBugType();
Register(&BT);
void *InsertPos;
- BugReportEquivClass* EQ = BT.EQClasses.FindNodeOrInsertPos(ID, InsertPos);
-
+ BugReportEquivClass* EQ = BT.EQClasses.FindNodeOrInsertPos(ID, InsertPos);
+
if (!EQ) {
EQ = new BugReportEquivClass(R);
BT.EQClasses.InsertNode(EQ, InsertPos);
@@ -1645,11 +1644,11 @@
assert(!EQ.Reports.empty());
BugReport &R = **EQ.begin();
PathDiagnosticClient* PD = getPathDiagnosticClient();
-
+
// FIXME: Make sure we use the 'R' for the path that was actually used.
- // Probably doesn't make a difference in practice.
+ // Probably doesn't make a difference in practice.
BugType& BT = R.getBugType();
-
+
llvm::OwningPtr<PathDiagnostic>
D(new PathDiagnostic(R.getBugType().getName(),
!PD || PD->useVerboseDescription()
@@ -1657,16 +1656,16 @@
BT.getCategory()));
GeneratePathDiagnostic(*D.get(), EQ);
-
+
// Get the meta data.
std::pair<const char**, const char**> Meta = R.getExtraDescriptiveText();
for (const char** s = Meta.first; s != Meta.second; ++s) D->addMeta(*s);
// Emit a summary diagnostic to the regular Diagnostics engine.
const SourceRange *Beg = 0, *End = 0;
- R.getRanges(Beg, End);
+ R.getRanges(Beg, End);
Diagnostic& Diag = getDiagnostic();
- FullSourceLoc L(R.getLocation(), getSourceManager());
+ FullSourceLoc L(R.getLocation(), getSourceManager());
unsigned ErrorDiag = Diag.getCustomDiagID(Diagnostic::Warning,
R.getShortDescription().c_str());
@@ -1681,15 +1680,15 @@
// Emit a full diagnostic for the path if we have a PathDiagnosticClient.
if (!PD)
return;
-
- if (D->empty()) {
+
+ if (D->empty()) {
PathDiagnosticPiece* piece =
new PathDiagnosticEventPiece(L, R.getDescription());
for ( ; Beg != End; ++Beg) piece->addRange(*Beg);
D->push_back(piece);
}
-
+
PD->HandlePathDiagnostic(D.take());
}
@@ -1702,7 +1701,7 @@
void BugReporter::EmitBasicReport(const char* name, const char* category,
const char* str, SourceLocation Loc,
SourceRange* RBeg, unsigned NumRanges) {
-
+
// 'BT' will be owned by BugReporter as soon as we call 'EmitReport'.
BugType *BT = new BugType(name, category);
FullSourceLoc L = getContext().getFullLoc(Loc);
diff --git a/lib/Analysis/BugReporterVisitors.cpp b/lib/Analysis/BugReporterVisitors.cpp
index 8b35028..b76ffb1 100644
--- a/lib/Analysis/BugReporterVisitors.cpp
+++ b/lib/Analysis/BugReporterVisitors.cpp
@@ -28,7 +28,7 @@
// Pattern match for a few useful cases (do something smarter later):
// a[0], p->f, *p
const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
-
+
if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
if (U->getOpcode() == UnaryOperator::Deref)
return U->getSubExpr()->IgnoreParenCasts();
@@ -41,8 +41,8 @@
// to reason about them.
return AE->getBase();
}
-
- return NULL;
+
+ return NULL;
}
const Stmt*
@@ -91,19 +91,19 @@
public:
FindLastStoreBRVisitor(SVal v, const MemRegion *r)
: R(r), V(v), satisfied(false), StoreSite(0) {}
-
+
PathDiagnosticPiece* VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext& BRC) {
-
+
if (satisfied)
return NULL;
-
- if (!StoreSite) {
+
+ if (!StoreSite) {
const ExplodedNode *Node = N, *Last = NULL;
-
+
for ( ; Node ; Last = Node, Node = Node->getFirstPred()) {
-
+
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
if (const PostStmt *P = Node->getLocationAs<PostStmt>())
if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
@@ -112,35 +112,35 @@
break;
}
}
-
+
if (Node->getState()->getSVal(R) != V)
break;
}
-
+
if (!Node || !Last) {
satisfied = true;
return NULL;
}
-
+
StoreSite = Last;
}
-
+
if (StoreSite != N)
return NULL;
-
+
satisfied = true;
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) {
-
+
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
os << "Variable '" << VR->getDecl()->getNameAsString() << "' ";
}
else
return NULL;
-
+
if (isa<loc::ConcreteInt>(V)) {
bool b = false;
ASTContext &C = BRC.getASTContext();
@@ -152,7 +152,7 @@
}
}
}
-
+
if (!b)
os << "initialized to a null pointer value";
}
@@ -165,13 +165,13 @@
if (VD->getInit())
os << "initialized to a garbage value";
else
- os << "declared without an initial value";
- }
+ os << "declared without an initial value";
+ }
}
}
}
-
- if (os.str().empty()) {
+
+ if (os.str().empty()) {
if (isa<loc::ConcreteInt>(V)) {
bool b = false;
ASTContext &C = BRC.getASTContext();
@@ -183,7 +183,7 @@
}
}
}
-
+
if (!b)
os << "Null pointer value stored to ";
}
@@ -196,18 +196,18 @@
}
else
return NULL;
-
+
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
os << '\'' << VR->getDecl()->getNameAsString() << '\'';
}
else
return NULL;
}
-
+
// FIXME: Refactor this into BugReporterContext.
- const Stmt *S = 0;
+ const Stmt *S = 0;
ProgramPoint P = N->getLocation();
-
+
if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
CFGBlock *BSrc = BE->getSrc();
S = BSrc->getTerminatorCondition();
@@ -215,10 +215,10 @@
else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
S = PS->getStmt();
}
-
+
if (!S)
return NULL;
-
+
// Construct a new PathDiagnosticPiece.
PathDiagnosticLocation L(S, BRC.getSourceManager());
return new PathDiagnosticEventPiece(L, os.str());
@@ -238,42 +238,42 @@
public:
TrackConstraintBRVisitor(SVal constraint, bool assumption)
: Constraint(constraint), Assumption(assumption), isSatisfied(false) {}
-
+
PathDiagnosticPiece* VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext& BRC) {
if (isSatisfied)
return NULL;
-
+
// Check if in the previous state it was feasible for this constraint
// to *not* be true.
if (PrevN->getState()->assume(Constraint, !Assumption)) {
-
+
isSatisfied = true;
-
+
// As a sanity check, make sure that the negation of the constraint
// was infeasible in the current state. If it is feasible, we somehow
// missed the transition point.
if (N->getState()->assume(Constraint, !Assumption))
return NULL;
-
+
// We found the transition point for the constraint. We now need to
- // pretty-print the constraint. (work-in-progress)
+ // pretty-print the constraint. (work-in-progress)
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
if (isa<Loc>(Constraint)) {
os << "Assuming pointer value is ";
os << (Assumption ? "non-null" : "null");
}
-
+
if (os.str().empty())
return NULL;
-
+
// FIXME: Refactor this into BugReporterContext.
- const Stmt *S = 0;
+ const Stmt *S = 0;
ProgramPoint P = N->getLocation();
-
+
if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
CFGBlock *BSrc = BE->getSrc();
S = BSrc->getTerminatorCondition();
@@ -281,65 +281,65 @@
else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
S = PS->getStmt();
}
-
+
if (!S)
return NULL;
-
+
// Construct a new PathDiagnosticPiece.
PathDiagnosticLocation L(S, BRC.getSourceManager());
return new PathDiagnosticEventPiece(L, os.str());
}
-
+
return NULL;
- }
+ }
};
} // end anonymous namespace
static void registerTrackConstraint(BugReporterContext& BRC, SVal Constraint,
bool Assumption) {
- BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption));
+ BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption));
}
void clang::bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC,
const void *data,
const ExplodedNode* N) {
-
+
const Stmt *S = static_cast<const Stmt*>(data);
-
+
if (!S)
return;
-
+
GRStateManager &StateMgr = BRC.getStateManager();
- const GRState *state = N->getState();
-
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const GRState *state = N->getState();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
const VarRegion *R =
StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
-
+
// What did we load?
SVal V = state->getSVal(S);
-
- if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
+
+ if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
|| V.isUndef()) {
registerFindLastStore(BRC, R, V);
}
}
}
-
+
SVal V = state->getSValAsScalarOrLoc(S);
-
+
// Uncomment this to find cases where we aren't properly getting the
// base value that was dereferenced.
// assert(!V.isUnknownOrUndef());
-
+
// Is it a symbolic value?
if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) {
const SubRegion *R = cast<SubRegion>(L->getRegion());
while (R && !isa<SymbolicRegion>(R)) {
R = dyn_cast<SubRegion>(R->getSuperRegion());
}
-
+
if (R) {
assert(isa<SymbolicRegion>(R));
registerTrackConstraint(BRC, loc::MemRegionVal(R), false);
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index ee64bd2..d5fde0a 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -133,8 +133,8 @@
CFGBlock *createBlock(bool add_successor = true);
bool FinishBlock(CFGBlock* B);
CFGBlock *addStmt(Stmt *S) { return Visit(S, true); }
-
-
+
+
/// TryResult - a class representing a variant over the values
/// 'true', 'false', or 'unknown'. This is returned by TryEvaluateBool,
/// and is used by the CFGBuilder to decide if a branch condition
@@ -144,7 +144,7 @@
public:
TryResult(bool b) : X(b ? 1 : 0) {}
TryResult() : X(-1) {}
-
+
bool isTrue() const { return X == 1; }
bool isFalse() const { return X == 0; }
bool isKnown() const { return X >= 0; }
@@ -153,7 +153,7 @@
X ^= 0x1;
}
};
-
+
/// TryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
/// if we can evaluate to a known value, otherwise return -1.
TryResult TryEvaluateBool(Expr *S) {
@@ -292,109 +292,109 @@
case Stmt::AddrLabelExprClass:
return VisitAddrLabelExpr(cast<AddrLabelExpr>(S), alwaysAdd);
-
+
case Stmt::BinaryOperatorClass:
return VisitBinaryOperator(cast<BinaryOperator>(S), alwaysAdd);
-
+
case Stmt::BlockExprClass:
return VisitBlockExpr(cast<BlockExpr>(S), alwaysAdd);
case Stmt::BlockDeclRefExprClass:
return VisitBlockDeclRefExpr(cast<BlockDeclRefExpr>(S), alwaysAdd);
-
+
case Stmt::BreakStmtClass:
return VisitBreakStmt(cast<BreakStmt>(S));
-
+
case Stmt::CallExprClass:
return VisitCallExpr(cast<CallExpr>(S), alwaysAdd);
-
+
case Stmt::CaseStmtClass:
return VisitCaseStmt(cast<CaseStmt>(S));
case Stmt::ChooseExprClass:
return VisitChooseExpr(cast<ChooseExpr>(S));
-
+
case Stmt::CompoundStmtClass:
return VisitCompoundStmt(cast<CompoundStmt>(S));
-
+
case Stmt::ConditionalOperatorClass:
return VisitConditionalOperator(cast<ConditionalOperator>(S));
-
+
case Stmt::ContinueStmtClass:
return VisitContinueStmt(cast<ContinueStmt>(S));
-
+
case Stmt::DeclStmtClass:
return VisitDeclStmt(cast<DeclStmt>(S));
-
+
case Stmt::DefaultStmtClass:
return VisitDefaultStmt(cast<DefaultStmt>(S));
-
+
case Stmt::DoStmtClass:
return VisitDoStmt(cast<DoStmt>(S));
-
+
case Stmt::ForStmtClass:
return VisitForStmt(cast<ForStmt>(S));
-
+
case Stmt::GotoStmtClass:
return VisitGotoStmt(cast<GotoStmt>(S));
-
+
case Stmt::IfStmtClass:
return VisitIfStmt(cast<IfStmt>(S));
-
+
case Stmt::IndirectGotoStmtClass:
return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
-
+
case Stmt::LabelStmtClass:
return VisitLabelStmt(cast<LabelStmt>(S));
-
+
case Stmt::ObjCAtCatchStmtClass:
- return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
-
+ return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
+
case Stmt::CXXThrowExprClass:
return VisitCXXThrowExpr(cast<CXXThrowExpr>(S));
case Stmt::ObjCAtSynchronizedStmtClass:
return VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S));
-
+
case Stmt::ObjCAtThrowStmtClass:
return VisitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(S));
-
+
case Stmt::ObjCAtTryStmtClass:
return VisitObjCAtTryStmt(cast<ObjCAtTryStmt>(S));
-
+
case Stmt::ObjCForCollectionStmtClass:
return VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S));
-
+
case Stmt::ParenExprClass:
S = cast<ParenExpr>(S)->getSubExpr();
- goto tryAgain;
-
+ goto tryAgain;
+
case Stmt::NullStmtClass:
return Block;
-
+
case Stmt::ReturnStmtClass:
return VisitReturnStmt(cast<ReturnStmt>(S));
-
+
case Stmt::SizeOfAlignOfExprClass:
- return VisitSizeOfAlignOfExpr(cast<SizeOfAlignOfExpr>(S), alwaysAdd);
-
+ return VisitSizeOfAlignOfExpr(cast<SizeOfAlignOfExpr>(S), alwaysAdd);
+
case Stmt::StmtExprClass:
return VisitStmtExpr(cast<StmtExpr>(S), alwaysAdd);
-
+
case Stmt::SwitchStmtClass:
return VisitSwitchStmt(cast<SwitchStmt>(S));
-
+
case Stmt::WhileStmtClass:
return VisitWhileStmt(cast<WhileStmt>(S));
}
}
-
+
CFGBlock *CFGBuilder::VisitStmt(Stmt *S, bool alwaysAdd) {
if (alwaysAdd) {
autoCreateBlock();
Block->appendStmt(S);
}
-
+
return VisitChildren(S);
}
@@ -407,7 +407,7 @@
}
return B;
}
-
+
CFGBlock *CFGBuilder::VisitAddrLabelExpr(AddrLabelExpr *A, bool alwaysAdd) {
AddressTakenLabels.insert(A->getLabel());
@@ -418,26 +418,26 @@
return Block;
}
-
+
CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B, bool alwaysAdd) {
if (B->isLogicalOp()) { // && or ||
CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
ConfluenceBlock->appendStmt(B);
-
+
if (!FinishBlock(ConfluenceBlock))
return 0;
-
+
// create the block evaluating the LHS
CFGBlock* LHSBlock = createBlock(false);
LHSBlock->setTerminator(B);
-
+
// create the block evaluating the RHS
Succ = ConfluenceBlock;
Block = NULL;
CFGBlock* RHSBlock = addStmt(B->getRHS());
if (!FinishBlock(RHSBlock))
return 0;
-
+
// See if this is a known constant.
TryResult KnownVal = TryEvaluateBool(B->getLHS());
if (KnownVal.isKnown() && (B->getOpcode() == BinaryOperator::LOr))
@@ -447,23 +447,23 @@
if (B->getOpcode() == BinaryOperator::LOr) {
LHSBlock->addSuccessor(KnownVal.isTrue() ? NULL : ConfluenceBlock);
LHSBlock->addSuccessor(KnownVal.isFalse() ? NULL : RHSBlock);
- } else {
+ } else {
assert (B->getOpcode() == BinaryOperator::LAnd);
LHSBlock->addSuccessor(KnownVal.isFalse() ? NULL : RHSBlock);
LHSBlock->addSuccessor(KnownVal.isTrue() ? NULL : ConfluenceBlock);
}
-
+
// Generate the blocks for evaluating the LHS.
Block = LHSBlock;
return addStmt(B->getLHS());
- }
+ }
else if (B->getOpcode() == BinaryOperator::Comma) { // ,
autoCreateBlock();
Block->appendStmt(B);
addStmt(B->getRHS());
return addStmt(B->getLHS());
}
-
+
return VisitStmt(B, alwaysAdd);
}
@@ -477,28 +477,28 @@
// FIXME
return NYS();
}
-
+
CFGBlock *CFGBuilder::VisitBreakStmt(BreakStmt *B) {
// "break" is a control-flow statement. Thus we stop processing the current
// block.
if (Block && !FinishBlock(Block))
return 0;
-
+
// Now create a new block that ends with the break statement.
Block = createBlock(false);
Block->setTerminator(B);
-
+
// If there is no target for the break, then we are looking at an incomplete
// AST. This means that the CFG cannot be constructed.
if (BreakTargetBlock)
Block->addSuccessor(BreakTargetBlock);
else
badCFG = true;
-
-
+
+
return Block;
}
-
+
CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, bool alwaysAdd) {
// If this is a call to a no-return function, this stops the block here.
bool NoReturn = false;
@@ -512,17 +512,17 @@
if (!NoReturn)
return VisitStmt(C, alwaysAdd);
-
+
if (Block && !FinishBlock(Block))
return 0;
-
+
// Create new block with no successor for the remaining pieces.
Block = createBlock(false);
Block->appendStmt(C);
// Wire this to the exit block directly.
Block->addSuccessor(&cfg->getExit());
-
+
return VisitChildren(C);
}
@@ -531,42 +531,42 @@
ConfluenceBlock->appendStmt(C);
if (!FinishBlock(ConfluenceBlock))
return 0;
-
+
Succ = ConfluenceBlock;
Block = NULL;
CFGBlock* LHSBlock = addStmt(C->getLHS());
if (!FinishBlock(LHSBlock))
return 0;
-
+
Succ = ConfluenceBlock;
Block = NULL;
CFGBlock* RHSBlock = addStmt(C->getRHS());
if (!FinishBlock(RHSBlock))
return 0;
-
+
Block = createBlock(false);
// See if this is a known constant.
const TryResult& KnownVal = TryEvaluateBool(C->getCond());
Block->addSuccessor(KnownVal.isFalse() ? NULL : LHSBlock);
Block->addSuccessor(KnownVal.isTrue() ? NULL : RHSBlock);
Block->setTerminator(C);
- return addStmt(C->getCond());
+ return addStmt(C->getCond());
}
-
-
-CFGBlock* CFGBuilder::VisitCompoundStmt(CompoundStmt* C) {
- CFGBlock* LastBlock = Block;
+
+
+CFGBlock* CFGBuilder::VisitCompoundStmt(CompoundStmt* C) {
+ CFGBlock* LastBlock = Block;
for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
I != E; ++I ) {
LastBlock = addStmt(*I);
-
+
if (badCFG)
return NULL;
- }
+ }
return LastBlock;
}
-
+
CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C) {
// Create the confluence block that will "merge" the results of the ternary
// expression.
@@ -574,7 +574,7 @@
ConfluenceBlock->appendStmt(C);
if (!FinishBlock(ConfluenceBlock))
return 0;
-
+
// Create a block for the LHS expression if there is an LHS expression. A
// GCC extension allows LHS to be NULL, causing the condition to be the
// value that is returned instead.
@@ -588,16 +588,16 @@
return 0;
Block = NULL;
}
-
+
// Create the block for the RHS expression.
Succ = ConfluenceBlock;
CFGBlock* RHSBlock = addStmt(C->getRHS());
if (!FinishBlock(RHSBlock))
return 0;
-
+
// Create the block that will contain the condition.
Block = createBlock(false);
-
+
// See if this is a known constant.
const TryResult& KnownVal = TryEvaluateBool(C->getCond());
if (LHSBlock) {
@@ -622,8 +622,8 @@
ConfluenceBlock->pred_end());
}
}
-
- Block->addSuccessor(KnownVal.isTrue() ? NULL : RHSBlock);
+
+ Block->addSuccessor(KnownVal.isTrue() ? NULL : RHSBlock);
Block->setTerminator(C);
return addStmt(C->getCond());
}
@@ -635,45 +635,45 @@
Block->appendStmt(DS);
return VisitDeclSubExpr(DS->getSingleDecl());
}
-
+
CFGBlock *B = 0;
-
+
// FIXME: Add a reverse iterator for DeclStmt to avoid this extra copy.
typedef llvm::SmallVector<Decl*,10> BufTy;
BufTy Buf(DS->decl_begin(), DS->decl_end());
-
+
for (BufTy::reverse_iterator I = Buf.rbegin(), E = Buf.rend(); I != E; ++I) {
// Get the alignment of the new DeclStmt, padding out to >=8 bytes.
unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
? 8 : llvm::AlignOf<DeclStmt>::Alignment;
-
+
// Allocate the DeclStmt using the BumpPtrAllocator. It will get
// automatically freed with the CFG.
DeclGroupRef DG(*I);
Decl *D = *I;
- void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
+ void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
-
+
// Append the fake DeclStmt to block.
Block->appendStmt(DSNew);
B = VisitDeclSubExpr(D);
}
-
- return B;
+
+ return B;
}
-
+
/// VisitDeclSubExpr - Utility method to add block-level expressions for
/// initializers in Decls.
CFGBlock *CFGBuilder::VisitDeclSubExpr(Decl* D) {
assert(Block);
VarDecl *VD = dyn_cast<VarDecl>(D);
-
+
if (!VD)
return Block;
-
+
Expr *Init = VD->getInit();
-
+
if (Init) {
// Optimization: Don't create separate block-level statements for literals.
switch (Init->getStmtClass()) {
@@ -685,12 +685,12 @@
Block = addStmt(Init);
}
}
-
+
// If the type of VD is a VLA, then we must process its size expressions.
for (VariableArrayType* VA = FindVA(VD->getType().getTypePtr()); VA != 0;
VA = FindVA(VA->getElementType().getTypePtr()))
Block = addStmt(VA->getSizeExpr());
-
+
return Block;
}
@@ -879,7 +879,7 @@
// See if this is a known constant.
TryResult KnownVal(true);
-
+
if (F->getCond())
KnownVal = TryEvaluateBool(F->getCond());
@@ -1171,8 +1171,8 @@
Succ = EntryConditionBlock;
return EntryConditionBlock;
}
-
-
+
+
CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt* S) {
// FIXME: For now we pretend that @catch and the code it contains does not
// exit.
@@ -1329,7 +1329,7 @@
return Block;
}
-
+
CFGBlock *CFGBuilder::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E,
bool alwaysAdd) {
@@ -1337,17 +1337,17 @@
autoCreateBlock();
Block->appendStmt(E);
}
-
+
// VLA types have expressions that must be evaluated.
if (E->isArgumentType()) {
for (VariableArrayType* VA = FindVA(E->getArgumentType().getTypePtr());
VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
addStmt(VA->getSizeExpr());
}
-
+
return Block;
}
-
+
/// VisitStmtExpr - Utility method to handle (nested) statement
/// expressions (a GCC extension).
CFGBlock* CFGBuilder::VisitStmtExpr(StmtExpr *SE, bool alwaysAdd) {
@@ -1416,7 +1416,7 @@
if (CS->getSubStmt())
addStmt(CS->getSubStmt());
-
+
CFGBlock* CaseBlock = Block;
if (!CaseBlock)
CaseBlock = createBlock();
@@ -1445,7 +1445,7 @@
CFGBlock* CFGBuilder::VisitDefaultStmt(DefaultStmt* Terminator) {
if (Terminator->getSubStmt())
addStmt(Terminator->getSubStmt());
-
+
DefaultCaseBlock = Block;
if (!DefaultCaseBlock)
@@ -1454,7 +1454,7 @@
// Default statements partition blocks, so this is the top of the basic block
// we were processing (the "default:" is the label).
DefaultCaseBlock->setLabel(Terminator);
-
+
if (!FinishBlock(DefaultCaseBlock))
return 0;
diff --git a/lib/Analysis/CFRefCount.cpp b/lib/Analysis/CFRefCount.cpp
index 9cd59c2..e511f76 100644
--- a/lib/Analysis/CFRefCount.cpp
+++ b/lib/Analysis/CFRefCount.cpp
@@ -22,7 +22,7 @@
#include "clang/Analysis/PathSensitive/BugReporter.h"
#include "clang/Analysis/PathSensitive/SymbolManager.h"
#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
-#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
@@ -44,7 +44,7 @@
// MemoryMgmt/Tasks/MemoryManagementRules.html
//
// "You take ownership of an object if you create it using a method whose name
-// begins with "alloc" or "new" or contains "copy" (for example, alloc,
+// begins with "alloc" or "new" or contains "copy" (for example, alloc,
// newObject, or mutableCopy), or if you send it a retain message. You are
// responsible for relinquishing ownership of objects you own using release
// or autorelease. Any other time you receive an object, you must
@@ -62,8 +62,8 @@
|| (isupper(prev) && isupper(ch) && islower(next)) // XXCreate
|| !isalpha(ch);
}
-
-static inline const char* parseWord(const char* s) {
+
+static inline const char* parseWord(const char* s) {
char ch = *s, prev = '\0';
assert(ch != '\0');
char next = *(s+1);
@@ -77,18 +77,18 @@
static NamingConvention deriveNamingConvention(Selector S) {
IdentifierInfo *II = S.getIdentifierInfoForSlot(0);
-
+
if (!II)
return NoConvention;
-
+
const char *s = II->getName();
-
+
// A method/function name may contain a prefix. We don't know it is there,
// however, until we encounter the first '_'.
bool InPossiblePrefix = true;
bool AtBeginning = true;
NamingConvention C = NoConvention;
-
+
while (*s != '\0') {
// Skip '_'.
if (*s == '_') {
@@ -103,24 +103,24 @@
++s;
continue;
}
-
+
// Skip numbers, ':', etc.
if (!isalpha(*s)) {
++s;
continue;
}
-
+
const char *wordEnd = parseWord(s);
assert(wordEnd > s);
unsigned len = wordEnd - s;
-
+
switch (len) {
default:
break;
case 3:
// Methods starting with 'new' follow the create rule.
if (AtBeginning && StringsEqualNoCase("new", s, len))
- C = CreateRule;
+ C = CreateRule;
break;
case 4:
// Methods starting with 'alloc' or contain 'copy' follow the
@@ -136,7 +136,7 @@
C = CreateRule;
break;
}
-
+
// If we aren't in the prefix and have a derived convention then just
// return it now.
if (!InPossiblePrefix && C != NoConvention)
@@ -156,10 +156,10 @@
}
static const ObjCMethodDecl*
-ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
+ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
ObjCInterfaceDecl *ID =
const_cast<ObjCInterfaceDecl*>(MD->getClassInterface());
-
+
return MD->isInstanceMethod()
? ID->lookupInstanceMethod(MD->getSelector())
: ID->lookupClassMethod(MD->getSelector());
@@ -178,12 +178,12 @@
GenericNodeBuilder(GREndPathNodeBuilder &enb)
: SNB(0), S(0), tag(0), ENB(&enb) {}
-
+
ExplodedNode *MakeNode(const GRState *state, ExplodedNode *Pred) {
if (SNB)
- return SNB->generateNode(PostStmt(S, Pred->getLocationContext(), tag),
+ return SNB->generateNode(PostStmt(S, Pred->getLocationContext(), tag),
state, Pred);
-
+
assert(ENB);
return ENB->generateNode(state, Pred);
}
@@ -211,16 +211,16 @@
static bool hasPrefix(const char* s, const char* prefix) {
if (!prefix)
return true;
-
+
char c = *s;
char cP = *prefix;
-
+
while (c != '\0' && cP != '\0') {
if (c != cP) break;
c = *(++s);
cP = *(++prefix);
}
-
+
return cP == '\0';
}
@@ -231,14 +231,14 @@
static bool isRefType(QualType RetTy, const char* prefix,
ASTContext* Ctx = 0, const char* name = 0) {
-
+
// Recursively walk the typedef stack, allowing typedefs of reference types.
while (1) {
if (TypedefType* TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) {
const char* TDName = TD->getDecl()->getIdentifier()->getName();
if (hasPrefix(TDName, prefix) && hasSuffix(TDName, "Ref"))
return true;
-
+
RetTy = TD->getDecl()->getUnderlyingType();
continue;
}
@@ -282,14 +282,14 @@
namespace {
/// RetEffect is used to summarize a function/method call's behavior with
-/// respect to its return value.
+/// respect to its return value.
class VISIBILITY_HIDDEN RetEffect {
public:
enum Kind { NoRet, Alias, OwnedSymbol, OwnedAllocatedSymbol,
NotOwnedSymbol, GCNotOwnedSymbol, ReceiverAlias,
OwnedWhenTrackedReceiver };
-
- enum ObjKind { CF, ObjC, AnyObj };
+
+ enum ObjKind { CF, ObjC, AnyObj };
private:
Kind K;
@@ -298,124 +298,124 @@
RetEffect(Kind k, unsigned idx = 0) : K(k), O(AnyObj), index(idx) {}
RetEffect(Kind k, ObjKind o) : K(k), O(o), index(0) {}
-
+
public:
Kind getKind() const { return K; }
ObjKind getObjKind() const { return O; }
-
- unsigned getIndex() const {
+
+ unsigned getIndex() const {
assert(getKind() == Alias);
return index;
}
-
+
bool isOwned() const {
return K == OwnedSymbol || K == OwnedAllocatedSymbol ||
K == OwnedWhenTrackedReceiver;
}
-
+
static RetEffect MakeOwnedWhenTrackedReceiver() {
return RetEffect(OwnedWhenTrackedReceiver, ObjC);
}
-
+
static RetEffect MakeAlias(unsigned Idx) {
return RetEffect(Alias, Idx);
}
static RetEffect MakeReceiverAlias() {
return RetEffect(ReceiverAlias);
- }
+ }
static RetEffect MakeOwned(ObjKind o, bool isAllocated = false) {
return RetEffect(isAllocated ? OwnedAllocatedSymbol : OwnedSymbol, o);
- }
+ }
static RetEffect MakeNotOwned(ObjKind o) {
return RetEffect(NotOwnedSymbol, o);
}
static RetEffect MakeGCNotOwned() {
return RetEffect(GCNotOwnedSymbol, ObjC);
}
-
+
static RetEffect MakeNoRet() {
return RetEffect(NoRet);
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned)K);
ID.AddInteger((unsigned)O);
ID.AddInteger(index);
}
};
-
-
+
+
class VISIBILITY_HIDDEN RetainSummary {
/// Args - an ordered vector of (index, ArgEffect) pairs, where index
/// specifies the argument (starting from 0). This can be sparsely
/// populated; arguments with no entry in Args use 'DefaultArgEffect'.
ArgEffects Args;
-
+
/// DefaultArgEffect - The default ArgEffect to apply to arguments that
/// do not have an entry in Args.
ArgEffect DefaultArgEffect;
-
+
/// Receiver - If this summary applies to an Objective-C message expression,
/// this is the effect applied to the state of the receiver.
ArgEffect Receiver;
-
+
/// Ret - The effect on the return value. Used to indicate if the
/// function/method call returns a new tracked symbol, returns an
/// alias of one of the arguments in the call, and so on.
RetEffect Ret;
-
+
/// EndPath - Indicates that execution of this method/function should
/// terminate the simulation of a path.
bool EndPath;
-
+
public:
RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
ArgEffect ReceiverEff, bool endpath = false)
: Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R),
- EndPath(endpath) {}
-
+ EndPath(endpath) {}
+
/// getArg - Return the argument effect on the argument specified by
/// idx (starting from 0).
ArgEffect getArg(unsigned idx) const {
if (const ArgEffect *AE = Args.lookup(idx))
return *AE;
-
+
return DefaultArgEffect;
}
-
+
/// setDefaultArgEffect - Set the default argument effect.
void setDefaultArgEffect(ArgEffect E) {
DefaultArgEffect = E;
}
-
+
/// setArg - Set the argument effect on the argument specified by idx.
void setArgEffect(ArgEffects::Factory& AF, unsigned idx, ArgEffect E) {
Args = AF.Add(Args, idx, E);
}
-
+
/// getRetEffect - Returns the effect on the return value of the call.
RetEffect getRetEffect() const { return Ret; }
-
+
/// setRetEffect - Set the effect of the return value of the call.
void setRetEffect(RetEffect E) { Ret = E; }
-
+
/// isEndPath - Returns true if executing the given method/function should
/// terminate the path.
bool isEndPath() const { return EndPath; }
-
+
/// getReceiverEffect - Returns the effect on the receiver of the call.
/// This is only meaningful if the summary applies to an ObjCMessageExpr*.
ArgEffect getReceiverEffect() const { return Receiver; }
-
+
/// setReceiverEffect - Set the effect on the receiver of the call.
void setReceiverEffect(ArgEffect E) { Receiver = E; }
-
+
typedef ArgEffects::iterator ExprIterator;
-
+
ExprIterator begin_args() const { return Args.begin(); }
ExprIterator end_args() const { return Args.end(); }
-
+
static void Profile(llvm::FoldingSetNodeID& ID, ArgEffects A,
RetEffect RetEff, ArgEffect DefaultEff,
ArgEffect ReceiverEff, bool EndPath) {
@@ -425,7 +425,7 @@
ID.AddInteger((unsigned) ReceiverEff);
ID.AddInteger((unsigned) EndPath);
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
Profile(ID, Args, Ret, DefaultArgEffect, Receiver, EndPath);
}
@@ -440,7 +440,7 @@
class VISIBILITY_HIDDEN ObjCSummaryKey {
IdentifierInfo* II;
Selector S;
-public:
+public:
ObjCSummaryKey(IdentifierInfo* ii, Selector s)
: II(ii), S(s) {}
@@ -449,10 +449,10 @@
ObjCSummaryKey(const ObjCInterfaceDecl* d, IdentifierInfo *ii, Selector s)
: II(d ? d->getIdentifier() : ii), S(s) {}
-
+
ObjCSummaryKey(Selector s)
: II(0), S(s) {}
-
+
IdentifierInfo* getIdentifier() const { return II; }
Selector getSelector() const { return S; }
};
@@ -464,56 +464,56 @@
return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
DenseMapInfo<Selector>::getEmptyKey());
}
-
+
static inline ObjCSummaryKey getTombstoneKey() {
return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
- DenseMapInfo<Selector>::getTombstoneKey());
+ DenseMapInfo<Selector>::getTombstoneKey());
}
-
+
static unsigned getHashValue(const ObjCSummaryKey &V) {
return (DenseMapInfo<IdentifierInfo*>::getHashValue(V.getIdentifier())
- & 0x88888888)
+ & 0x88888888)
| (DenseMapInfo<Selector>::getHashValue(V.getSelector())
& 0x55555555);
}
-
+
static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
return DenseMapInfo<IdentifierInfo*>::isEqual(LHS.getIdentifier(),
RHS.getIdentifier()) &&
DenseMapInfo<Selector>::isEqual(LHS.getSelector(),
RHS.getSelector());
}
-
+
static bool isPod() {
return DenseMapInfo<ObjCInterfaceDecl*>::isPod() &&
DenseMapInfo<Selector>::isPod();
}
};
} // end llvm namespace
-
+
namespace {
class VISIBILITY_HIDDEN ObjCSummaryCache {
typedef llvm::DenseMap<ObjCSummaryKey, RetainSummary*> MapTy;
MapTy M;
public:
ObjCSummaryCache() {}
-
+
RetainSummary* find(const ObjCInterfaceDecl* D, IdentifierInfo *ClsName,
Selector S) {
// Lookup the method using the decl for the class @interface. If we
// have no decl, lookup using the class name.
return D ? find(D, S) : find(ClsName, S);
}
-
- RetainSummary* find(const ObjCInterfaceDecl* D, Selector S) {
+
+ RetainSummary* find(const ObjCInterfaceDecl* D, Selector S) {
// Do a lookup with the (D,S) pair. If we find a match return
// the iterator.
ObjCSummaryKey K(D, S);
MapTy::iterator I = M.find(K);
-
+
if (I != M.end() || !D)
return I->second;
-
+
// Walk the super chain. If we find a hit with a parent, we'll end
// up returning that summary. We actually allow that key (null,S), as
// we cache summaries for the null ObjCInterfaceDecl* to allow us to
@@ -523,62 +523,62 @@
for (ObjCInterfaceDecl* C=D->getSuperClass() ;; C=C->getSuperClass()) {
if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
break;
-
+
if (!C)
return NULL;
}
-
- // Cache the summary with original key to make the next lookup faster
+
+ // Cache the summary with original key to make the next lookup faster
// and return the iterator.
RetainSummary *Summ = I->second;
M[K] = Summ;
return Summ;
}
-
+
RetainSummary* find(Expr* Receiver, Selector S) {
return find(getReceiverDecl(Receiver), S);
}
-
+
RetainSummary* find(IdentifierInfo* II, Selector S) {
// FIXME: Class method lookup. Right now we dont' have a good way
// of going between IdentifierInfo* and the class hierarchy.
MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
-
+
if (I == M.end())
I = M.find(ObjCSummaryKey(S));
-
+
return I == M.end() ? NULL : I->second;
}
-
- const ObjCInterfaceDecl* getReceiverDecl(Expr* E) {
+
+ const ObjCInterfaceDecl* getReceiverDecl(Expr* E) {
if (const ObjCObjectPointerType* PT =
E->getType()->getAsObjCObjectPointerType())
return PT->getInterfaceDecl();
return NULL;
}
-
+
RetainSummary*& operator[](ObjCMessageExpr* ME) {
-
+
Selector S = ME->getSelector();
-
+
if (Expr* Receiver = ME->getReceiver()) {
const ObjCInterfaceDecl* OD = getReceiverDecl(Receiver);
return OD ? M[ObjCSummaryKey(OD->getIdentifier(), S)] : M[S];
}
-
+
return M[ObjCSummaryKey(ME->getClassName(), S)];
}
-
+
RetainSummary*& operator[](ObjCSummaryKey K) {
return M[K];
}
-
+
RetainSummary*& operator[](Selector S) {
return M[ ObjCSummaryKey(S) ];
}
-};
+};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -591,29 +591,29 @@
//==-----------------------------------------------------------------==//
// Typedefs.
//==-----------------------------------------------------------------==//
-
+
typedef llvm::DenseMap<FunctionDecl*, RetainSummary*>
FuncSummariesTy;
-
+
typedef ObjCSummaryCache ObjCMethodSummariesTy;
-
+
//==-----------------------------------------------------------------==//
// Data.
//==-----------------------------------------------------------------==//
-
+
/// Ctx - The ASTContext object for the analyzed ASTs.
ASTContext& Ctx;
/// CFDictionaryCreateII - An IdentifierInfo* representing the indentifier
/// "CFDictionaryCreate".
IdentifierInfo* CFDictionaryCreateII;
-
+
/// GCEnabled - Records whether or not the analyzed code runs in GC mode.
const bool GCEnabled;
-
+
/// FuncSummaries - A map from FunctionDecls to summaries.
- FuncSummariesTy FuncSummaries;
-
+ FuncSummariesTy FuncSummaries;
+
/// ObjCClassMethodSummaries - A map from selectors (for instance methods)
/// to summaries.
ObjCMethodSummariesTy ObjCClassMethodSummaries;
@@ -624,34 +624,34 @@
/// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
/// and all other data used by the checker.
llvm::BumpPtrAllocator BPAlloc;
-
+
/// AF - A factory for ArgEffects objects.
- ArgEffects::Factory AF;
-
+ ArgEffects::Factory AF;
+
/// ScratchArgs - A holding buffer for construct ArgEffects.
ArgEffects ScratchArgs;
-
+
/// ObjCAllocRetE - Default return effect for methods returning Objective-C
/// objects.
RetEffect ObjCAllocRetE;
- /// ObjCInitRetE - Default return effect for init methods returning
+ /// ObjCInitRetE - Default return effect for init methods returning
/// Objective-C objects.
RetEffect ObjCInitRetE;
-
+
RetainSummary DefaultSummary;
RetainSummary* StopSummary;
-
+
//==-----------------------------------------------------------------==//
// Methods.
//==-----------------------------------------------------------------==//
-
+
/// getArgEffects - Returns a persistent ArgEffects object based on the
/// data in ScratchArgs.
ArgEffects getArgEffects();
- enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
-
+ enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
+
public:
RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
@@ -659,13 +659,13 @@
RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
return new (Summ) RetainSummary(DefaultSummary);
}
-
+
RetainSummary* getUnarySummary(const FunctionType* FT, UnaryFuncKind func);
-
+
RetainSummary* getCFSummaryCreateRule(FunctionDecl* FD);
- RetainSummary* getCFSummaryGetRule(FunctionDecl* FD);
+ RetainSummary* getCFSummaryGetRule(FunctionDecl* FD);
RetainSummary* getCFCreateGetRuleSummary(FunctionDecl* FD, const char* FName);
-
+
RetainSummary* getPersistentSummary(ArgEffects AE, RetEffect RetEff,
ArgEffect ReceiverEff = DoNothing,
ArgEffect DefaultEff = MayEscape,
@@ -676,36 +676,36 @@
ArgEffect DefaultEff = MayEscape) {
return getPersistentSummary(getArgEffects(), RE, ReceiverEff, DefaultEff);
}
-
+
RetainSummary *getPersistentStopSummary() {
if (StopSummary)
return StopSummary;
-
+
StopSummary = getPersistentSummary(RetEffect::MakeNoRet(),
StopTracking, StopTracking);
return StopSummary;
- }
+ }
RetainSummary *getInitMethodSummary(QualType RetTy);
void InitializeClassMethodSummaries();
void InitializeMethodSummaries();
-
+
bool isTrackedObjCObjectType(QualType T);
bool isTrackedCFObjectType(QualType T);
-
+
private:
-
+
void addClsMethSummary(IdentifierInfo* ClsII, Selector S,
RetainSummary* Summ) {
ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
void addNSObjectClsMethSummary(Selector S, RetainSummary *Summ) {
ObjCClassMethodSummaries[S] = Summ;
}
-
+
void addNSObjectMethSummary(Selector S, RetainSummary *Summ) {
ObjCMethodSummaries[S] = Summ;
}
@@ -716,43 +716,43 @@
Selector S = GetNullarySelector(nullaryName, Ctx);
ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
void addInstMethSummary(const char* Cls, const char* nullaryName,
RetainSummary *Summ) {
IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
Selector S = GetNullarySelector(nullaryName, Ctx);
ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
Selector generateSelector(va_list argp) {
llvm::SmallVector<IdentifierInfo*, 10> II;
while (const char* s = va_arg(argp, const char*))
II.push_back(&Ctx.Idents.get(s));
- return Ctx.Selectors.getSelector(II.size(), &II[0]);
+ return Ctx.Selectors.getSelector(II.size(), &II[0]);
}
-
+
void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy& Summaries,
RetainSummary* Summ, va_list argp) {
Selector S = generateSelector(argp);
Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
void addInstMethSummary(const char* Cls, RetainSummary* Summ, ...) {
va_list argp;
va_start(argp, Summ);
addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
- va_end(argp);
+ va_end(argp);
}
-
+
void addClsMethSummary(const char* Cls, RetainSummary* Summ, ...) {
va_list argp;
va_start(argp, Summ);
addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
va_end(argp);
}
-
+
void addClsMethSummary(IdentifierInfo *II, RetainSummary* Summ, ...) {
va_list argp;
va_start(argp, Summ);
@@ -769,9 +769,9 @@
addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
va_end(argp);
}
-
+
public:
-
+
RetainSummaryManager(ASTContext& ctx, bool gcenabled)
: Ctx(ctx),
CFDictionaryCreateII(&ctx.Idents.get("CFDictionaryCreate")),
@@ -789,17 +789,17 @@
InitializeClassMethodSummaries();
InitializeMethodSummaries();
}
-
+
~RetainSummaryManager();
-
- RetainSummary* getSummary(FunctionDecl* FD);
-
+
+ RetainSummary* getSummary(FunctionDecl* FD);
+
RetainSummary* getInstanceMethodSummary(ObjCMessageExpr* ME,
const ObjCInterfaceDecl* ID) {
return getInstanceMethodSummary(ME->getSelector(), ME->getClassName(),
- ID, ME->getMethodDecl(), ME->getType());
+ ID, ME->getMethodDecl(), ME->getType());
}
-
+
RetainSummary* getInstanceMethodSummary(Selector S, IdentifierInfo *ClsName,
const ObjCInterfaceDecl* ID,
const ObjCMethodDecl *MD,
@@ -809,7 +809,7 @@
const ObjCInterfaceDecl *ID,
const ObjCMethodDecl *MD,
QualType RetTy);
-
+
RetainSummary *getClassMethodSummary(ObjCMessageExpr *ME) {
return getClassMethodSummary(ME->getSelector(), ME->getClassName(),
ME->getClassInfo().first,
@@ -824,17 +824,17 @@
Selector S = MD->getSelector();
IdentifierInfo *ClsName = ID->getIdentifier();
QualType ResultTy = MD->getResultType();
-
- // Resolve the method decl last.
+
+ // Resolve the method decl last.
if (const ObjCMethodDecl *InterfaceMD = ResolveToInterfaceMethodDecl(MD))
MD = InterfaceMD;
-
+
if (MD->isInstanceMethod())
return getInstanceMethodSummary(S, ClsName, ID, MD, ResultTy);
else
return getClassMethodSummary(S, ClsName, ID, MD, ResultTy);
}
-
+
RetainSummary* getCommonMethodSummary(const ObjCMethodDecl* MD,
Selector S, QualType RetTy);
@@ -845,14 +845,14 @@
const FunctionDecl *FD);
bool isGCEnabled() const { return GCEnabled; }
-
+
RetainSummary *copySummary(RetainSummary *OldSumm) {
RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
new (Summ) RetainSummary(*OldSumm);
return Summ;
- }
+ }
};
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -871,7 +871,7 @@
RetainSummaryManager::getPersistentSummary(ArgEffects AE, RetEffect RetEff,
ArgEffect ReceiverEff,
ArgEffect DefaultEff,
- bool isEndPath) {
+ bool isEndPath) {
// Create the summary and return it.
RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
new (Summ) RetainSummary(AE, RetEff, DefaultEff, ReceiverEff, isEndPath);
@@ -887,31 +887,31 @@
return false;
const ObjCObjectPointerType *PT = Ty->getAsObjCObjectPointerType();
-
+
// Can be true for objects with the 'NSObject' attribute.
if (!PT)
return true;
-
+
// We assume that id<..>, id, and "Class" all represent tracked objects.
if (PT->isObjCIdType() || PT->isObjCQualifiedIdType() ||
PT->isObjCClassType())
return true;
- // Does the interface subclass NSObject?
- // FIXME: We can memoize here if this gets too expensive.
- const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+ // Does the interface subclass NSObject?
+ // FIXME: We can memoize here if this gets too expensive.
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
// Assume that anything declared with a forward declaration and no
// @interface subclasses NSObject.
if (ID->isForwardDecl())
return true;
-
+
IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
for ( ; ID ; ID = ID->getSuperClass())
if (ID->getIdentifier() == NSObjectII)
return true;
-
+
return false;
}
@@ -945,33 +945,33 @@
// No summary? Generate one.
RetainSummary *S = 0;
-
+
do {
// We generate "stop" summaries for implicitly defined functions.
if (FD->isImplicit()) {
S = getPersistentStopSummary();
break;
}
-
+
// [PR 3337] Use 'getAsFunctionType' to strip away any typedefs on the
// function's type.
const FunctionType* FT = FD->getType()->getAsFunctionType();
const char* FName = FD->getIdentifier()->getName();
-
+
// Strip away preceding '_'. Doing this here will effect all the checks
// down below.
while (*FName == '_') ++FName;
-
+
// Inspect the result type.
QualType RetTy = FT->getResultType();
-
+
// FIXME: This should all be refactored into a chain of "summary lookup"
// filters.
assert (ScratchArgs.isEmpty());
-
+
switch (strlen(FName)) {
default: break;
-
+
case 17:
// Handle: id NSMakeCollectable(CFTypeRef)
@@ -1003,10 +1003,10 @@
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
ScratchArgs = AF.Add(ScratchArgs, 2, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
break;
-
+
case 25:
if (!memcmp(FName, "IORegistryEntryIDMatching", 25)) {
// Part of <rdar://problem/6961230>. (IOKit)
@@ -1015,13 +1015,13 @@
DoNothing, DoNothing);
}
break;
-
+
case 26:
if (!memcmp(FName, "IOOpenFirmwarePathMatching", 26)) {
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
- DoNothing, DoNothing);
+ DoNothing, DoNothing);
}
break;
@@ -1030,7 +1030,7 @@
// Part of <rdar://problem/6961230>.
// This should be addressed using a API table.
ScratchArgs = AF.Add(ScratchArgs, 1, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
break;
@@ -1043,17 +1043,17 @@
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
break;
-
+
case 32:
if (!memcmp(FName, "IOServiceAddMatchingNotification", 32)) {
// Part of <rdar://problem/6961230>.
// This should be addressed using a API table.
ScratchArgs = AF.Add(ScratchArgs, 2, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
break;
}
-
+
// Did we get a summary?
if (S)
break;
@@ -1063,7 +1063,7 @@
#if 0
// Handle: NSDeallocateObject(id anObject);
// This method does allow 'nil' (although we don't check it now).
- if (strcmp(FName, "NSDeallocateObject") == 0) {
+ if (strcmp(FName, "NSDeallocateObject") == 0) {
return RetTy == Ctx.VoidTy
? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc)
: getPersistentStopSummary();
@@ -1077,7 +1077,7 @@
S = getUnarySummary(FT, cfretain);
else if (strstr(FName, "MakeCollectable"))
S = getUnarySummary(FT, cfmakecollectable);
- else
+ else
S = getCFCreateGetRuleSummary(FD, FName);
break;
@@ -1100,7 +1100,7 @@
S = getCFCreateGetRuleSummary(FD, FName);
break;
}
-
+
break;
}
@@ -1112,7 +1112,7 @@
FName += 4;
else
FName += 2;
-
+
if (isRelease(FD, FName))
S = getUnarySummary(FT, cfrelease);
else {
@@ -1122,9 +1122,9 @@
// and that ownership cannot be transferred. While this is technically
// correct, many methods allow a tracked object to escape. For example:
//
- // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+ // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
// CFDictionaryAddValue(y, key, x);
- // CFRelease(x);
+ // CFRelease(x);
// ... it is okay to use 'x' since 'y' has a reference to it
//
// We handle this and similar cases with the follow heuristic. If the
@@ -1138,34 +1138,34 @@
CStrInCStrNoCase(FName, "AppendValue") ||
CStrInCStrNoCase(FName, "SetAttribute"))
? MayEscape : DoNothing;
-
+
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
}
}
}
while (0);
-
+
if (!S)
S = getDefaultSummary();
// Annotations override defaults.
assert(S);
updateSummaryFromAnnotations(*S, FD);
-
+
FuncSummaries[FD] = S;
- return S;
+ return S;
}
RetainSummary*
RetainSummaryManager::getCFCreateGetRuleSummary(FunctionDecl* FD,
const char* FName) {
-
+
if (strstr(FName, "Create") || strstr(FName, "Copy"))
return getCFSummaryCreateRule(FD);
-
+
if (strstr(FName, "Get"))
return getCFSummaryGetRule(FD);
-
+
return getDefaultSummary();
}
@@ -1178,27 +1178,27 @@
const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
if (!FTP || FTP->getNumArgs() != 1)
return getPersistentStopSummary();
-
+
assert (ScratchArgs.isEmpty());
-
+
switch (func) {
case cfretain: {
ScratchArgs = AF.Add(ScratchArgs, 0, IncRef);
return getPersistentSummary(RetEffect::MakeAlias(0),
DoNothing, DoNothing);
}
-
+
case cfrelease: {
ScratchArgs = AF.Add(ScratchArgs, 0, DecRef);
return getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, DoNothing);
}
-
+
case cfmakecollectable: {
ScratchArgs = AF.Add(ScratchArgs, 0, MakeCollectable);
- return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing);
+ return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing);
}
-
+
default:
assert (false && "Not a supported unary function.");
return getDefaultSummary();
@@ -1207,17 +1207,17 @@
RetainSummary* RetainSummaryManager::getCFSummaryCreateRule(FunctionDecl* FD) {
assert (ScratchArgs.isEmpty());
-
+
if (FD->getIdentifier() == CFDictionaryCreateII) {
ScratchArgs = AF.Add(ScratchArgs, 1, DoNothingByRef);
ScratchArgs = AF.Add(ScratchArgs, 2, DoNothingByRef);
}
-
+
return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
}
RetainSummary* RetainSummaryManager::getCFSummaryGetRule(FunctionDecl* FD) {
- assert (ScratchArgs.isEmpty());
+ assert (ScratchArgs.isEmpty());
return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
DoNothing, DoNothing);
}
@@ -1228,12 +1228,12 @@
RetainSummary*
RetainSummaryManager::getInitMethodSummary(QualType RetTy) {
- assert(ScratchArgs.isEmpty());
+ assert(ScratchArgs.isEmpty());
// 'init' methods conceptually return a newly allocated object and claim
- // the receiver.
+ // the receiver.
if (isTrackedObjCObjectType(RetTy) || isTrackedCFObjectType(RetTy))
return getPersistentSummary(ObjCInitRetE, DecRefMsg);
-
+
return getDefaultSummary();
}
@@ -1244,7 +1244,7 @@
return;
QualType RetTy = FD->getResultType();
-
+
// Determine if there is a special return effect for this method.
if (isTrackedObjCObjectType(RetTy)) {
if (FD->getAttr<NSReturnsRetainedAttr>()) {
@@ -1268,20 +1268,20 @@
return;
bool isTrackedLoc = false;
-
+
// Determine if there is a special return effect for this method.
if (isTrackedObjCObjectType(MD->getResultType())) {
if (MD->getAttr<NSReturnsRetainedAttr>()) {
Summ.setRetEffect(ObjCAllocRetE);
return;
}
-
+
isTrackedLoc = true;
}
-
+
if (!isTrackedLoc)
isTrackedLoc = MD->getResultType()->getAs<PointerType>() != NULL;
-
+
if (isTrackedLoc && MD->getAttr<CFReturnsRetainedAttr>())
Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
}
@@ -1304,10 +1304,10 @@
ScratchArgs = AF.Add(ScratchArgs, i, StopTracking);
}
}
-
+
// Any special effect for the receiver?
ArgEffect ReceiverEff = DoNothing;
-
+
// If one of the arguments in the selector has the keyword 'delegate' we
// should stop tracking the reference count for the receiver. This is
// because the reference count is quite possibly handled by a delegate
@@ -1317,29 +1317,29 @@
assert(!str.empty());
if (CStrInCStrNoCase(&str[0], "delegate:")) ReceiverEff = StopTracking;
}
-
+
// Look for methods that return an owned object.
- if (isTrackedObjCObjectType(RetTy)) {
+ if (isTrackedObjCObjectType(RetTy)) {
// EXPERIMENTAL: Assume the Cocoa conventions for all objects returned
// by instance methods.
RetEffect E = followsFundamentalRule(S)
? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC);
-
- return getPersistentSummary(E, ReceiverEff, MayEscape);
+
+ return getPersistentSummary(E, ReceiverEff, MayEscape);
}
-
+
// Look for methods that return an owned core foundation object.
if (isTrackedCFObjectType(RetTy)) {
RetEffect E = followsFundamentalRule(S)
? RetEffect::MakeOwned(RetEffect::CF, true)
: RetEffect::MakeNotOwned(RetEffect::CF);
-
+
return getPersistentSummary(E, ReceiverEff, MayEscape);
}
-
+
if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing)
return getDefaultSummary();
-
+
return getPersistentSummary(RetEffect::MakeNoRet(), ReceiverEff, MayEscape);
}
@@ -1352,23 +1352,23 @@
// Look up a summary in our summary cache.
RetainSummary *Summ = ObjCMethodSummaries.find(ID, ClsName, S);
-
+
if (!Summ) {
assert(ScratchArgs.isEmpty());
-
+
// "initXXX": pass-through for receiver.
if (deriveNamingConvention(S) == InitRule)
Summ = getInitMethodSummary(RetTy);
else
Summ = getCommonMethodSummary(MD, S, RetTy);
-
+
// Annotations override defaults.
updateSummaryFromAnnotations(*Summ, MD);
-
+
// Memoize the summary.
ObjCMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
}
-
+
return Summ;
}
@@ -1379,8 +1379,8 @@
QualType RetTy) {
assert(ClsName && "Class name must be specified.");
- RetainSummary *Summ = ObjCClassMethodSummaries.find(ID, ClsName, S);
-
+ RetainSummary *Summ = ObjCClassMethodSummaries.find(ID, ClsName, S);
+
if (!Summ) {
Summ = getCommonMethodSummary(MD, S, RetTy);
// Annotations override defaults.
@@ -1388,32 +1388,32 @@
// Memoize the summary.
ObjCClassMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
}
-
+
return Summ;
}
-void RetainSummaryManager::InitializeClassMethodSummaries() {
+void RetainSummaryManager::InitializeClassMethodSummaries() {
assert(ScratchArgs.isEmpty());
RetainSummary* Summ = getPersistentSummary(ObjCAllocRetE);
-
+
// Create the summaries for "alloc", "new", and "allocWithZone:" for
// NSObject and its derivatives.
addNSObjectClsMethSummary(GetNullarySelector("alloc", Ctx), Summ);
addNSObjectClsMethSummary(GetNullarySelector("new", Ctx), Summ);
addNSObjectClsMethSummary(GetUnarySelector("allocWithZone", Ctx), Summ);
-
- // Create the [NSAssertionHandler currentHander] summary.
+
+ // Create the [NSAssertionHandler currentHander] summary.
addClsMethSummary(&Ctx.Idents.get("NSAssertionHandler"),
GetNullarySelector("currentHandler", Ctx),
getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
-
+
// Create the [NSAutoreleasePool addObject:] summary.
ScratchArgs = AF.Add(ScratchArgs, 0, Autorelease);
addClsMethSummary(&Ctx.Idents.get("NSAutoreleasePool"),
GetUnarySelector("addObject", Ctx),
getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, Autorelease));
-
+
// Create the summaries for [NSObject performSelector...]. We treat
// these as 'stop tracking' for the arguments because they are often
// used for delegates that can release the object. When we have better
@@ -1435,7 +1435,7 @@
"withObject", "waitUntilDone", "modes", NULL);
addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground",
"withObject", NULL);
-
+
// Specially handle NSData.
RetainSummary *dataWithBytesNoCopySumm =
getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC), DoNothing,
@@ -1447,43 +1447,43 @@
}
void RetainSummaryManager::InitializeMethodSummaries() {
-
- assert (ScratchArgs.isEmpty());
-
+
+ assert (ScratchArgs.isEmpty());
+
// Create the "init" selector. It just acts as a pass-through for the
// receiver.
- RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
+ RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
// awakeAfterUsingCoder: behaves basically like an 'init' method. It
// claims the receiver and returns a retained object.
addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
InitSumm);
-
+
// The next methods are allocators.
RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
- RetainSummary *CFAllocSumm =
+ RetainSummary *CFAllocSumm =
getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
-
- // Create the "copy" selector.
- addNSObjectMethSummary(GetNullarySelector("copy", Ctx), AllocSumm);
+
+ // Create the "copy" selector.
+ addNSObjectMethSummary(GetNullarySelector("copy", Ctx), AllocSumm);
// Create the "mutableCopy" selector.
addNSObjectMethSummary(GetNullarySelector("mutableCopy", Ctx), AllocSumm);
-
+
// Create the "retain" selector.
RetEffect E = RetEffect::MakeReceiverAlias();
RetainSummary *Summ = getPersistentSummary(E, IncRefMsg);
addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
-
+
// Create the "release" selector.
Summ = getPersistentSummary(E, DecRefMsg);
addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
-
+
// Create the "drain" selector.
Summ = getPersistentSummary(E, isGCEnabled() ? DoNothing : DecRef);
addNSObjectMethSummary(GetNullarySelector("drain", Ctx), Summ);
-
+
// Create the -dealloc summary.
Summ = getPersistentSummary(RetEffect::MakeNoRet(), Dealloc);
addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
@@ -1491,13 +1491,13 @@
// Create the "autorelease" selector.
Summ = getPersistentSummary(E, Autorelease);
addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
-
+
// Specially handle NSAutoreleasePool.
addInstMethSummary("NSAutoreleasePool", "init",
getPersistentSummary(RetEffect::MakeReceiverAlias(),
NewAutoreleasePool));
-
- // For NSWindow, allocated objects are (initially) self-owned.
+
+ // For NSWindow, allocated objects are (initially) self-owned.
// FIXME: For now we opt for false negatives with NSWindow, as these objects
// self-own themselves. However, they only do this once they are displayed.
// Thus, we need to track an NSWindow's display status.
@@ -1506,42 +1506,42 @@
RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
StopTracking,
StopTracking);
-
+
addClassMethSummary("NSWindow", "alloc", NoTrackYet);
#if 0
addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", NULL);
-
+
addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", "screen", NULL);
#endif
-
+
// For NSPanel (which subclasses NSWindow), allocated objects are not
// self-owned.
// FIXME: For now we don't track NSPanels. object for the same reason
// as for NSWindow objects.
addClassMethSummary("NSPanel", "alloc", NoTrackYet);
-
+
#if 0
addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", NULL);
-
+
addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", "screen", NULL);
#endif
-
+
// Don't track allocated autorelease pools yet, as it is okay to prematurely
// exit a method.
addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
// Create NSAssertionHandler summaries.
addPanicSummary("NSAssertionHandler", "handleFailureInFunction", "file",
- "lineNumber", "description", NULL);
-
+ "lineNumber", "description", NULL);
+
addPanicSummary("NSAssertionHandler", "handleFailureInMethod", "object",
"file", "lineNumber", "description", NULL);
-
+
// Create summaries QCRenderer/QCView -createSnapShotImageOfType:
addInstMethSummary("QCRenderer", AllocSumm,
"createSnapshotImageOfType", NULL);
@@ -1554,7 +1554,7 @@
addInstMethSummary("CIContext", CFAllocSumm,
"createCGImage", "fromRect", NULL);
addInstMethSummary("CIContext", CFAllocSumm,
- "createCGImage", "fromRect", "format", "colorSpace", NULL);
+ "createCGImage", "fromRect", "format", "colorSpace", NULL);
addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize",
"info", NULL);
}
@@ -1564,19 +1564,19 @@
//===----------------------------------------------------------------------===//
namespace {
-
+
class VISIBILITY_HIDDEN RefVal {
-public:
+public:
enum Kind {
- Owned = 0, // Owning reference.
- NotOwned, // Reference is not owned by still valid (not freed).
+ Owned = 0, // Owning reference.
+ NotOwned, // Reference is not owned by still valid (not freed).
Released, // Object has been released.
ReturnedOwned, // Returned object passes ownership to caller.
ReturnedNotOwned, // Return object does not pass ownership to caller.
ERROR_START,
ErrorDeallocNotOwned, // -dealloc called on non-owned object.
ErrorDeallocGC, // Calling -dealloc with GC enabled.
- ErrorUseAfterRelease, // Object used after released.
+ ErrorUseAfterRelease, // Object used after released.
ErrorReleaseNotOwned, // Release of an object that was not owned.
ERROR_LEAK_START,
ErrorLeak, // A memory leak due to excessive reference counts.
@@ -1587,7 +1587,7 @@
ErrorReturnedNotOwned
};
-private:
+private:
Kind kind;
RetEffect::ObjKind okind;
unsigned Cnt;
@@ -1600,9 +1600,9 @@
RefVal(Kind k, unsigned cnt = 0)
: kind(k), okind(RetEffect::AnyObj), Cnt(cnt), ACnt(0) {}
-public:
+public:
Kind getKind() const { return kind; }
-
+
RetEffect::ObjKind getObjKind() const { return okind; }
unsigned getCount() const { return Cnt; }
@@ -1611,72 +1611,72 @@
void clearCounts() { Cnt = 0; ACnt = 0; }
void setCount(unsigned i) { Cnt = i; }
void setAutoreleaseCount(unsigned i) { ACnt = i; }
-
+
QualType getType() const { return T; }
-
+
// Useful predicates.
-
+
static bool isError(Kind k) { return k >= ERROR_START; }
-
+
static bool isLeak(Kind k) { return k >= ERROR_LEAK_START; }
-
+
bool isOwned() const {
return getKind() == Owned;
}
-
+
bool isNotOwned() const {
return getKind() == NotOwned;
}
-
+
bool isReturnedOwned() const {
return getKind() == ReturnedOwned;
}
-
+
bool isReturnedNotOwned() const {
return getKind() == ReturnedNotOwned;
}
-
+
bool isNonLeakError() const {
Kind k = getKind();
return isError(k) && !isLeak(k);
}
-
+
static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 1) {
return RefVal(Owned, o, Count, 0, t);
}
-
+
static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 0) {
return RefVal(NotOwned, o, Count, 0, t);
}
-
+
// Comparison, profiling, and pretty-printing.
-
+
bool operator==(const RefVal& X) const {
return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
}
-
+
RefVal operator-(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() - i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator+(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() + i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator^(Kind k) const {
return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
getType());
}
-
+
RefVal autorelease() const {
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
getType());
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned) kind);
ID.AddInteger(Cnt);
@@ -1686,41 +1686,41 @@
void print(llvm::raw_ostream& Out) const;
};
-
+
void RefVal::print(llvm::raw_ostream& Out) const {
if (!T.isNull())
Out << "Tracked Type:" << T.getAsString() << '\n';
-
+
switch (getKind()) {
default: assert(false);
- case Owned: {
+ case Owned: {
Out << "Owned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case NotOwned: {
Out << "NotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
- case ReturnedOwned: {
+
+ case ReturnedOwned: {
Out << "ReturnedOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case ReturnedNotOwned: {
Out << "ReturnedNotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case Released:
Out << "Released";
break;
@@ -1728,19 +1728,19 @@
case ErrorDeallocGC:
Out << "-dealloc (GC)";
break;
-
+
case ErrorDeallocNotOwned:
Out << "-dealloc (not-owned)";
break;
-
+
case ErrorLeak:
Out << "Leaked";
- break;
-
+ break;
+
case ErrorLeakReturned:
Out << "Leaked (Bad naming)";
break;
-
+
case ErrorGCLeakReturned:
Out << "Leaked (GC-ed at return)";
break;
@@ -1748,38 +1748,38 @@
case ErrorUseAfterRelease:
Out << "Use-After-Release [ERROR]";
break;
-
+
case ErrorReleaseNotOwned:
Out << "Release of Not-Owned [ERROR]";
break;
-
+
case RefVal::ErrorOverAutorelease:
Out << "Over autoreleased";
break;
-
+
case RefVal::ErrorReturnedNotOwned:
Out << "Non-owned object returned instead of owned";
break;
}
-
+
if (ACnt) {
Out << " [ARC +" << ACnt << ']';
}
}
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// RefBindings - State used to track object reference counts.
//===----------------------------------------------------------------------===//
-
+
typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings;
static int RefBIndex = 0;
namespace clang {
template<>
struct GRStateTrait<RefBindings> : public GRStatePartialTrait<RefBindings> {
- static inline void* GDMIndex() { return &RefBIndex; }
+ static inline void* GDMIndex() { return &RefBIndex; }
};
}
@@ -1800,12 +1800,12 @@
namespace clang {
template<> struct GRStateTrait<AutoreleaseStack>
: public GRStatePartialTrait<ARStack> {
- static inline void* GDMIndex() { return &AutoRBIndex; }
+ static inline void* GDMIndex() { return &AutoRBIndex; }
};
template<> struct GRStateTrait<AutoreleasePoolContents>
: public GRStatePartialTrait<ARPoolContents> {
- static inline void* GDMIndex() { return &AutoRCIndex; }
+ static inline void* GDMIndex() { return &AutoRCIndex; }
};
} // end clang namespace
@@ -1820,14 +1820,14 @@
SymbolRef pool = GetCurrentAutoreleasePool(state);
const ARCounts *cnts = state->get<AutoreleasePoolContents>(pool);
ARCounts newCnts(0);
-
+
if (cnts) {
const unsigned *cnt = (*cnts).lookup(sym);
newCnts = F.Add(*cnts, sym, cnt ? *cnt + 1 : 1);
}
else
newCnts = F.Add(F.GetEmptyMap(), sym, 1);
-
+
return state->set<AutoreleasePoolContents>(pool, newCnts);
}
@@ -1836,7 +1836,7 @@
//===----------------------------------------------------------------------===//
namespace {
-
+
class VISIBILITY_HIDDEN CFRefCount : public GRTransferFuncs {
public:
class BindingsPrinter : public GRState::Printer {
@@ -1847,9 +1847,9 @@
private:
typedef llvm::DenseMap<const ExplodedNode*, const RetainSummary*>
- SummaryLogTy;
+ SummaryLogTy;
- RetainSummaryManager Summaries;
+ RetainSummaryManager Summaries;
SummaryLogTy SummaryLog;
const LangOptions& LOpts;
ARCounts::Factory ARCountFactory;
@@ -1860,7 +1860,7 @@
BugType *overAutorelease;
BugType *returnNotOwnedForOwned;
BugReporter *BR;
-
+
const GRState * Update(const GRState * state, SymbolRef sym, RefVal V, ArgEffect E,
RefVal::Kind& hasErr);
@@ -1870,40 +1870,40 @@
ExplodedNode* Pred,
const GRState* St,
RefVal::Kind hasErr, SymbolRef Sym);
-
+
const GRState * HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V,
llvm::SmallVectorImpl<SymbolRef> &Leaked);
-
+
ExplodedNode* ProcessLeaks(const GRState * state,
llvm::SmallVectorImpl<SymbolRef> &Leaked,
GenericNodeBuilder &Builder,
GRExprEngine &Eng,
ExplodedNode *Pred = 0);
-
-public:
+
+public:
CFRefCount(ASTContext& Ctx, bool gcenabled, const LangOptions& lopts)
: Summaries(Ctx, gcenabled),
LOpts(lopts), useAfterRelease(0), releaseNotOwned(0),
deallocGC(0), deallocNotOwned(0),
leakWithinFunction(0), leakAtReturn(0), overAutorelease(0),
returnNotOwnedForOwned(0), BR(0) {}
-
+
virtual ~CFRefCount() {}
-
+
void RegisterChecks(BugReporter &BR);
-
+
virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) {
Printers.push_back(new BindingsPrinter());
}
-
+
bool isGCEnabled() const { return Summaries.isGCEnabled(); }
const LangOptions& getLangOptions() const { return LOpts; }
-
+
const RetainSummary *getSummaryOfNode(const ExplodedNode *N) const {
SummaryLogTy::const_iterator I = SummaryLog.find(N);
return I == SummaryLog.end() ? 0 : I->second;
}
-
+
// Calls.
void EvalSummary(ExplodedNodeSet& Dst,
@@ -1914,47 +1914,47 @@
const RetainSummary& Summ,
ExprIterator arg_beg, ExprIterator arg_end,
ExplodedNode* Pred);
-
+
virtual void EvalCall(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
GRStmtNodeBuilder& Builder,
CallExpr* CE, SVal L,
- ExplodedNode* Pred);
-
-
+ ExplodedNode* Pred);
+
+
virtual void EvalObjCMessageExpr(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
ObjCMessageExpr* ME,
ExplodedNode* Pred);
-
+
bool EvalObjCMessageExprAux(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
ObjCMessageExpr* ME,
ExplodedNode* Pred);
- // Stores.
+ // Stores.
virtual void EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val);
// End-of-path.
-
+
virtual void EvalEndPath(GRExprEngine& Engine,
GREndPathNodeBuilder& Builder);
-
+
virtual void EvalDeadSymbols(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
ExplodedNode* Pred,
Stmt* S, const GRState* state,
SymbolReaper& SymReaper);
-
+
std::pair<ExplodedNode*, const GRState *>
HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd,
ExplodedNode* Pred, GRExprEngine &Eng,
SymbolRef Sym, RefVal V, bool &stop);
// Return statements.
-
+
virtual void EvalReturn(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
@@ -1977,34 +1977,34 @@
else
Out << "<pool>";
Out << ":{";
-
+
// Get the contents of the pool.
if (const ARCounts *cnts = state->get<AutoreleasePoolContents>(Sym))
for (ARCounts::iterator J=cnts->begin(), EJ=cnts->end(); J != EJ; ++J)
Out << '(' << J.getKey() << ',' << J.getData() << ')';
- Out << '}';
+ Out << '}';
}
void CFRefCount::BindingsPrinter::Print(llvm::raw_ostream& Out,
const GRState* state,
const char* nl, const char* sep) {
-
+
RefBindings B = state->get<RefBindings>();
-
+
if (!B.isEmpty())
Out << sep << nl;
-
+
for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
Out << (*I).first << " : ";
(*I).second.print(Out);
Out << nl;
}
-
+
// Print the autorelease stack.
Out << sep << nl << "AR pool stack:";
ARStack stack = state->get<AutoreleaseStack>();
-
+
PrintPool(Out, SymbolRef(), state); // Print the caller's pool.
for (ARStack::iterator I=stack.begin(), E=stack.end(); I!=E; ++I)
PrintPool(Out, *I, state);
@@ -2017,117 +2017,117 @@
//===----------------------------------------------------------------------===//
namespace {
-
+
//===-------------===//
// Bug Descriptions. //
- //===-------------===//
-
+ //===-------------===//
+
class VISIBILITY_HIDDEN CFRefBug : public BugType {
protected:
CFRefCount& TF;
-
- CFRefBug(CFRefCount* tf, const char* name)
- : BugType(name, "Memory (Core Foundation/Objective-C)"), TF(*tf) {}
+
+ CFRefBug(CFRefCount* tf, const char* name)
+ : BugType(name, "Memory (Core Foundation/Objective-C)"), TF(*tf) {}
public:
-
+
CFRefCount& getTF() { return TF; }
const CFRefCount& getTF() const { return TF; }
-
+
// FIXME: Eventually remove.
virtual const char* getDescription() const = 0;
-
+
virtual bool isLeak() const { return false; }
};
-
+
class VISIBILITY_HIDDEN UseAfterRelease : public CFRefBug {
public:
UseAfterRelease(CFRefCount* tf)
: CFRefBug(tf, "Use-after-release") {}
-
+
const char* getDescription() const {
return "Reference-counted object is used after it is released";
- }
+ }
};
-
+
class VISIBILITY_HIDDEN BadRelease : public CFRefBug {
public:
BadRelease(CFRefCount* tf) : CFRefBug(tf, "Bad release") {}
-
+
const char* getDescription() const {
return "Incorrect decrement of the reference count of an "
"object is not owned at this point by the caller";
}
};
-
+
class VISIBILITY_HIDDEN DeallocGC : public CFRefBug {
public:
DeallocGC(CFRefCount *tf)
: CFRefBug(tf, "-dealloc called while using garbage collection") {}
-
+
const char *getDescription() const {
return "-dealloc called while using garbage collection";
}
};
-
+
class VISIBILITY_HIDDEN DeallocNotOwned : public CFRefBug {
public:
DeallocNotOwned(CFRefCount *tf)
: CFRefBug(tf, "-dealloc sent to non-exclusively owned object") {}
-
+
const char *getDescription() const {
return "-dealloc sent to object that may be referenced elsewhere";
}
- };
-
+ };
+
class VISIBILITY_HIDDEN OverAutorelease : public CFRefBug {
public:
- OverAutorelease(CFRefCount *tf) :
+ OverAutorelease(CFRefCount *tf) :
CFRefBug(tf, "Object sent -autorelease too many times") {}
-
+
const char *getDescription() const {
return "Object sent -autorelease too many times";
}
};
-
+
class VISIBILITY_HIDDEN ReturnedNotOwnedForOwned : public CFRefBug {
public:
ReturnedNotOwnedForOwned(CFRefCount *tf) :
CFRefBug(tf, "Method should return an owned object") {}
-
+
const char *getDescription() const {
return "Object with +0 retain counts returned to caller where a +1 "
"(owning) retain count is expected";
}
};
-
+
class VISIBILITY_HIDDEN Leak : public CFRefBug {
const bool isReturn;
protected:
Leak(CFRefCount* tf, const char* name, bool isRet)
: CFRefBug(tf, name), isReturn(isRet) {}
public:
-
+
const char* getDescription() const { return ""; }
-
+
bool isLeak() const { return true; }
};
-
+
class VISIBILITY_HIDDEN LeakAtReturn : public Leak {
public:
LeakAtReturn(CFRefCount* tf, const char* name)
: Leak(tf, name, true) {}
};
-
+
class VISIBILITY_HIDDEN LeakWithinFunction : public Leak {
public:
LeakWithinFunction(CFRefCount* tf, const char* name)
: Leak(tf, name, false) {}
- };
-
+ };
+
//===---------===//
// Bug Reports. //
//===---------===//
-
+
class VISIBILITY_HIDDEN CFRefReport : public RangedBugReport {
protected:
SymbolRef Sym;
@@ -2140,30 +2140,30 @@
CFRefReport(CFRefBug& D, const CFRefCount &tf,
ExplodedNode *n, SymbolRef sym, const char* endText)
: RangedBugReport(D, D.getDescription(), endText, n), Sym(sym), TF(tf) {}
-
+
virtual ~CFRefReport() {}
-
+
CFRefBug& getBugType() {
return (CFRefBug&) RangedBugReport::getBugType();
}
const CFRefBug& getBugType() const {
return (const CFRefBug&) RangedBugReport::getBugType();
}
-
+
virtual void getRanges(const SourceRange*& beg, const SourceRange*& end) {
if (!getBugType().isLeak())
RangedBugReport::getRanges(beg, end);
else
beg = end = 0;
}
-
+
SymbolRef getSymbol() const { return Sym; }
-
+
PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
const ExplodedNode* N);
-
+
std::pair<const char**,const char**> getExtraDescriptiveText();
-
+
PathDiagnosticPiece* VisitNode(const ExplodedNode* N,
const ExplodedNode* PrevN,
BugReporterContext& BRC);
@@ -2176,36 +2176,36 @@
CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
ExplodedNode *n, SymbolRef sym,
GRExprEngine& Eng);
-
+
PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
const ExplodedNode* N);
-
+
SourceLocation getLocation() const { return AllocSite; }
- };
+ };
} // end anonymous namespace
void CFRefCount::RegisterChecks(BugReporter& BR) {
useAfterRelease = new UseAfterRelease(this);
BR.Register(useAfterRelease);
-
+
releaseNotOwned = new BadRelease(this);
BR.Register(releaseNotOwned);
-
+
deallocGC = new DeallocGC(this);
BR.Register(deallocGC);
-
+
deallocNotOwned = new DeallocNotOwned(this);
BR.Register(deallocNotOwned);
-
+
overAutorelease = new OverAutorelease(this);
BR.Register(overAutorelease);
-
+
returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this);
BR.Register(returnNotOwnedForOwned);
-
+
// First register "return" leaks.
const char* name = 0;
-
+
if (isGCEnabled())
name = "Leak of returned object when using garbage collection";
else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
@@ -2215,13 +2215,13 @@
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak of returned object";
}
-
+
leakAtReturn = new LeakAtReturn(this, name);
BR.Register(leakAtReturn);
-
+
// Second, register leaks within a function/method.
if (isGCEnabled())
- name = "Leak of object when using garbage collection";
+ name = "Leak of object when using garbage collection";
else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
name = "Leak of object when not using garbage collection (GC) in "
"dual GC/non-GC code";
@@ -2229,22 +2229,22 @@
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak";
}
-
+
leakWithinFunction = new LeakWithinFunction(this, name);
BR.Register(leakWithinFunction);
-
+
// Save the reference to the BugReporter.
this->BR = &BR;
}
static const char* Msgs[] = {
// GC only
- "Code is compiled to only use garbage collection",
+ "Code is compiled to only use garbage collection",
// No GC.
"Code is compiled to use reference counts",
// Hybrid, with GC.
"Code is compiled to use either garbage collection (GC) or reference counts"
- " (non-GC). The bug occurs with GC enabled",
+ " (non-GC). The bug occurs with GC enabled",
// Hybrid, without GC
"Code is compiled to use either garbage collection (GC) or reference counts"
" (non-GC). The bug occurs in non-GC mode"
@@ -2252,19 +2252,19 @@
std::pair<const char**,const char**> CFRefReport::getExtraDescriptiveText() {
CFRefCount& TF = static_cast<CFRefBug&>(getBugType()).getTF();
-
+
switch (TF.getLangOptions().getGCMode()) {
default:
assert(false);
-
+
case LangOptions::GCOnly:
assert (TF.isGCEnabled());
- return std::make_pair(&Msgs[0], &Msgs[0]+1);
-
+ return std::make_pair(&Msgs[0], &Msgs[0]+1);
+
case LangOptions::NonGC:
assert (!TF.isGCEnabled());
return std::make_pair(&Msgs[1], &Msgs[1]+1);
-
+
case LangOptions::HybridGC:
if (TF.isGCEnabled())
return std::make_pair(&Msgs[2], &Msgs[2]+1);
@@ -2278,50 +2278,50 @@
for (llvm::SmallVectorImpl<ArgEffect>::const_iterator I=V.begin(), E=V.end();
I!=E; ++I)
if (*I == X) return true;
-
+
return false;
}
PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode* N,
const ExplodedNode* PrevN,
BugReporterContext& BRC) {
-
+
if (!isa<PostStmt>(N->getLocation()))
return NULL;
-
+
// Check if the type state has changed.
const GRState *PrevSt = PrevN->getState();
const GRState *CurrSt = N->getState();
-
- const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
+
+ const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
if (!CurrT) return NULL;
-
+
const RefVal &CurrV = *CurrT;
const RefVal *PrevT = PrevSt->get<RefBindings>(Sym);
-
+
// Create a string buffer to constain all the useful things we want
// to tell the user.
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
// This is the allocation site since the previous node had no bindings
// for this symbol.
if (!PrevT) {
const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
-
+
if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Get the name of the callee (if it is available).
SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee());
if (const FunctionDecl* FD = X.getAsFunctionDecl())
os << "Call to function '" << FD->getNameAsString() <<'\'';
else
- os << "function call";
- }
+ os << "function call";
+ }
else {
assert (isa<ObjCMessageExpr>(S));
os << "Method";
}
-
+
if (CurrV.getObjKind() == RetEffect::CF) {
os << " returns a Core Foundation object with a ";
}
@@ -2329,10 +2329,10 @@
assert (CurrV.getObjKind() == RetEffect::ObjC);
os << " returns an Objective-C object with a ";
}
-
+
if (CurrV.isOwned()) {
os << "+1 retain count (owning reference).";
-
+
if (static_cast<CFRefBug&>(getBugType()).getTF().isGCEnabled()) {
assert(CurrV.getObjKind() == RetEffect::CF);
os << " "
@@ -2343,39 +2343,39 @@
assert (CurrV.isNotOwned());
os << "+0 retain count (non-owning reference).";
}
-
+
PathDiagnosticLocation Pos(S, BRC.getSourceManager());
return new PathDiagnosticEventPiece(Pos, os.str());
}
-
+
// Gather up the effects that were performed on the object at this
// program point
llvm::SmallVector<ArgEffect, 2> AEffects;
-
+
if (const RetainSummary *Summ =
TF.getSummaryOfNode(BRC.getNodeResolver().getOriginalNode(N))) {
// We only have summaries attached to nodes after evaluating CallExpr and
// ObjCMessageExprs.
const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
-
+
if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Iterate through the parameter expressions and see if the symbol
// was ever passed as an argument.
unsigned i = 0;
-
+
for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
AI!=AE; ++AI, ++i) {
-
+
// Retrieve the value of the argument. Is it the symbol
// we are interested in?
if (CurrSt->getSValAsScalarOrLoc(*AI).getAsLocSymbol() != Sym)
continue;
-
+
// We have an argument. Get the effect!
AEffects.push_back(Summ->getArg(i));
}
}
- else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
if (const Expr *receiver = ME->getReceiver())
if (CurrSt->getSValAsScalarOrLoc(receiver).getAsLocSymbol() == Sym) {
// The symbol we are tracking is the receiver.
@@ -2383,11 +2383,11 @@
}
}
}
-
+
do {
// Get the previous type state.
RefVal PrevV = *PrevT;
-
+
// Specially handle -dealloc.
if (!TF.isGCEnabled() && contains(AEffects, Dealloc)) {
// Determine if the object's reference count was pushed to zero.
@@ -2400,7 +2400,7 @@
break;
}
}
-
+
// Specially handle CFMakeCollectable and friends.
if (contains(AEffects, MakeCollectable)) {
// Get the name of the function.
@@ -2408,15 +2408,15 @@
SVal X = CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee());
const FunctionDecl* FD = X.getAsFunctionDecl();
const std::string& FName = FD->getNameAsString();
-
+
if (TF.isGCEnabled()) {
// Determine if the object's reference count was pushed to zero.
assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
-
+
os << "In GC mode a call to '" << FName
<< "' decrements an object's retain count and registers the "
"object with the garbage collector. ";
-
+
if (CurrV.getKind() == RefVal::Released) {
assert(CurrV.getCount() == 0);
os << "Since it now has a 0 retain count the object can be "
@@ -2427,67 +2427,67 @@
"After this call its retain count is +" << CurrV.getCount()
<< '.';
}
- else
+ else
os << "When GC is not enabled a call to '" << FName
<< "' has no effect on its argument.";
-
+
// Nothing more to say.
break;
}
-
- // Determine if the typestate has changed.
+
+ // Determine if the typestate has changed.
if (!(PrevV == CurrV))
switch (CurrV.getKind()) {
case RefVal::Owned:
case RefVal::NotOwned:
-
+
if (PrevV.getCount() == CurrV.getCount()) {
// Did an autorelease message get sent?
if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
return 0;
-
+
assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
os << "Object sent -autorelease message";
break;
}
-
+
if (PrevV.getCount() > CurrV.getCount())
os << "Reference count decremented.";
else
os << "Reference count incremented.";
-
+
if (unsigned Count = CurrV.getCount())
os << " The object now has a +" << Count << " retain count.";
-
+
if (PrevV.getKind() == RefVal::Released) {
assert(TF.isGCEnabled() && CurrV.getCount() > 0);
os << " The object is not eligible for garbage collection until the "
"retain count reaches 0 again.";
}
-
+
break;
-
+
case RefVal::Released:
os << "Object released.";
break;
-
+
case RefVal::ReturnedOwned:
os << "Object returned to caller as an owning reference (single retain "
"count transferred to caller).";
break;
-
+
case RefVal::ReturnedNotOwned:
os << "Object returned to caller with a +0 (non-owning) retain count.";
break;
-
+
default:
return NULL;
}
-
+
// Emit any remaining diagnostics for the argument effects (if any).
for (llvm::SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
E=AEffects.end(); I != E; ++I) {
-
+
// A bunch of things have alternate behavior under GC.
if (TF.isGCEnabled())
switch (*I) {
@@ -2503,25 +2503,25 @@
continue;
}
}
- } while(0);
-
+ } while (0);
+
if (os.str().empty())
return 0; // We have nothing to say!
const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
PathDiagnosticLocation Pos(S, BRC.getSourceManager());
PathDiagnosticPiece* P = new PathDiagnosticEventPiece(Pos, os.str());
-
+
// Add the range by scanning the children of the statement for any bindings
// to Sym.
- for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
I!=E; ++I)
if (const Expr* Exp = dyn_cast_or_null<Expr>(*I))
if (CurrSt->getSValAsScalarOrLoc(Exp).getAsLocSymbol() == Sym) {
P->addRange(Exp->getSourceRange());
break;
}
-
+
return P;
}
@@ -2531,56 +2531,56 @@
SymbolRef Sym;
const MemRegion* Binding;
bool First;
-
+
public:
FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {}
-
+
bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
SVal val) {
-
- SymbolRef SymV = val.getAsSymbol();
+
+ SymbolRef SymV = val.getAsSymbol();
if (!SymV || SymV != Sym)
return true;
-
+
if (Binding) {
First = false;
return false;
}
else
Binding = R;
-
- return true;
+
+ return true;
}
-
+
operator bool() { return First && Binding; }
const MemRegion* getRegion() { return Binding; }
- };
+ };
}
static std::pair<const ExplodedNode*,const MemRegion*>
GetAllocationSite(GRStateManager& StateMgr, const ExplodedNode* N,
SymbolRef Sym) {
-
+
// Find both first node that referred to the tracked symbol and the
// memory location that value was store to.
const ExplodedNode* Last = N;
- const MemRegion* FirstBinding = 0;
-
+ const MemRegion* FirstBinding = 0;
+
while (N) {
const GRState* St = N->getState();
RefBindings B = St->get<RefBindings>();
-
+
if (!B.lookup(Sym))
break;
-
+
FindUniqueBinding FB(Sym);
- StateMgr.iterBindings(St, FB);
- if (FB) FirstBinding = FB.getRegion();
-
+ StateMgr.iterBindings(St, FB);
+ if (FB) FirstBinding = FB.getRegion();
+
Last = N;
- N = N->pred_empty() ? NULL : *(N->pred_begin());
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
}
-
+
return std::make_pair(Last, FirstBinding);
}
@@ -2596,36 +2596,36 @@
PathDiagnosticPiece*
CFRefLeakReport::getEndPath(BugReporterContext& BRC,
const ExplodedNode* EndN){
-
+
// Tell the BugReporterContext to report cases when the tracked symbol is
// assigned to different variables, etc.
BRC.addNotableSymbol(Sym);
-
+
// We are reporting a leak. Walk up the graph to get to the first node where
// the symbol appeared, and also get the first VarDecl that tracked object
// is stored to.
const ExplodedNode* AllocNode = 0;
const MemRegion* FirstBinding = 0;
-
+
llvm::tie(AllocNode, FirstBinding) =
GetAllocationSite(BRC.getStateManager(), EndN, Sym);
-
- // Get the allocate site.
+
+ // Get the allocate site.
assert(AllocNode);
const Stmt* FirstStmt = cast<PostStmt>(AllocNode->getLocation()).getStmt();
-
+
SourceManager& SMgr = BRC.getSourceManager();
unsigned AllocLine =SMgr.getInstantiationLineNumber(FirstStmt->getLocStart());
-
+
// Compute an actual location for the leak. Sometimes a leak doesn't
// occur at an actual statement (e.g., transition between blocks; end
// of function) so we need to walk the graph and compute a real location.
const ExplodedNode* LeakN = EndN;
PathDiagnosticLocation L;
-
+
while (LeakN) {
ProgramPoint P = LeakN->getLocation();
-
+
if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
L = PathDiagnosticLocation(PS->getStmt()->getLocStart(), SMgr);
break;
@@ -2636,26 +2636,26 @@
break;
}
}
-
+
LeakN = LeakN->succ_empty() ? 0 : *(LeakN->succ_begin());
}
-
+
if (!L.isValid()) {
const Decl &D = BRC.getCodeDecl();
L = PathDiagnosticLocation(D.getBodyRBrace(), SMgr);
}
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "Object allocated on line " << AllocLine;
-
+
if (FirstBinding)
- os << " and stored into '" << FirstBinding->getString() << '\'';
-
+ os << " and stored into '" << FirstBinding->getString() << '\'';
+
// Get the retain count.
const RefVal* RV = EndN->getState()->get<RefBindings>(Sym);
-
+
if (RV->getKind() == RefVal::ErrorLeakReturned) {
// FIXME: Per comments in rdar://6320065, "create" only applies to CF
// ojbects. Only "copy", "alloc", "retain" and "new" transfer ownership
@@ -2678,16 +2678,15 @@
else
os << " is no longer referenced after this point and has a retain count of"
" +" << RV->getCount() << " (object leaked)";
-
+
return new PathDiagnosticEventPiece(L, os.str());
}
CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
ExplodedNode *n,
SymbolRef sym, GRExprEngine& Eng)
-: CFRefReport(D, tf, n, sym)
-{
-
+: CFRefReport(D, tf, n, sym) {
+
// Most bug reports are cached at the location where they occured.
// With leaks, we want to unique them by the location where they were
// allocated, and only report a single path. To do this, we need to find
@@ -2697,14 +2696,14 @@
// that all ancestor nodes that represent the allocation site have the
// same SourceLocation.
const ExplodedNode* AllocNode = 0;
-
+
llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
GetAllocationSite(Eng.getStateManager(), getEndNode(), getSymbol());
-
+
// Get the SourceLocation for the allocation site.
ProgramPoint P = AllocNode->getLocation();
AllocSite = cast<PostStmt>(P).getStmt()->getLocStart();
-
+
// Fill in the description of the bug.
Description.clear();
llvm::raw_string_ostream os(Description);
@@ -2713,9 +2712,9 @@
os << "Potential leak ";
if (tf.isGCEnabled()) {
os << "(when using garbage collection) ";
- }
+ }
os << "of an object allocated on line " << AllocLine;
-
+
// FIXME: AllocBinding doesn't get populated for RegionStore yet.
if (AllocBinding)
os << " and stored into '" << AllocBinding->getString() << '\'';
@@ -2737,16 +2736,16 @@
/// more specific than id.
if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
if (const ObjCObjectPointerType *PT = RetTy->getAsObjCObjectPointerType())
- if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
+ if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
PT->isObjCClassType()) {
// At this point we know the return type of the message expression is
// id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
// is a call to a class method whose type we can resolve. In such
// cases, promote the return type to XXX* (where XXX is the class).
- const ObjCInterfaceDecl *D = ME->getClassInfo().first;
+ const ObjCInterfaceDecl *D = ME->getClassInfo().first;
return !D ? RetTy : Ctx.getPointerType(Ctx.getObjCInterfaceType(D));
}
-
+
return RetTy;
}
@@ -2758,7 +2757,7 @@
const RetainSummary& Summ,
ExprIterator arg_beg, ExprIterator arg_end,
ExplodedNode* Pred) {
-
+
// Get the state.
const GRState *state = Builder.GetState(Pred);
@@ -2766,10 +2765,10 @@
RefVal::Kind hasErr = (RefVal::Kind) 0;
unsigned idx = 0;
Expr* ErrorExpr = NULL;
- SymbolRef ErrorSym = 0;
-
- for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
- SVal V = state->getSValAsScalarOrLoc(*I);
+ SymbolRef ErrorSym = 0;
+
+ for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
+ SVal V = state->getSValAsScalarOrLoc(*I);
SymbolRef Sym = V.getAsLocSymbol();
if (Sym)
@@ -2779,7 +2778,7 @@
ErrorExpr = *I;
ErrorSym = Sym;
break;
- }
+ }
continue;
}
@@ -2787,14 +2786,14 @@
if (loc::MemRegionVal* MR = dyn_cast<loc::MemRegionVal>(&V)) {
if (Summ.getArg(idx) == DoNothingByRef)
continue;
-
- // Invalidate the value of the variable passed by reference.
-
+
+ // Invalidate the value of the variable passed by reference.
+
// FIXME: We can have collisions on the conjured symbol if the
// expression *I also creates conjured symbols. We probably want
// to identify conjured symbols by an expression pair: the enclosing
// expression (the context) and the expression itself. This should
- // disambiguate conjured symbols.
+ // disambiguate conjured symbols.
unsigned Count = Builder.getCurrentBlockCount();
StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
@@ -2825,9 +2824,9 @@
// Is the invalidated variable something that we were tracking?
SymbolRef Sym = state->getSValAsScalarOrLoc(R).getAsLocSymbol();
-
+
// Remove any existing reference-count binding.
- if (Sym)
+ if (Sym)
state = state->remove<RefBindings>(Sym);
state = StoreMgr.InvalidateRegion(state, R, *I, Count);
@@ -2845,9 +2844,9 @@
// We should bind it to UnknownVal explicitly. Otherwise default value
// may be loaded.
state = state->unbindLoc(cast<nonloc::LocAsInteger>(V).getLoc());
- }
-
- // Evaluate the effect on the message receiver.
+ }
+
+ // Evaluate the effect on the message receiver.
if (!ErrorExpr && Receiver) {
SymbolRef Sym = state->getSValAsScalarOrLoc(Receiver).getAsLocSymbol();
if (Sym) {
@@ -2860,17 +2859,17 @@
}
}
}
-
- // Process any errors.
+
+ // Process any errors.
if (hasErr) {
ProcessNonLeakError(Dst, Builder, Ex, ErrorExpr, Pred, state,
hasErr, ErrorSym);
return;
}
-
- // Consult the summary for the return value.
+
+ // Consult the summary for the return value.
RetEffect RE = Summ.getRetEffect();
-
+
if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
assert(Receiver);
SVal V = state->getSValAsScalarOrLoc(Receiver);
@@ -2883,32 +2882,32 @@
if (!found)
RE = RetEffect::MakeNoRet();
- }
-
+ }
+
switch (RE.getKind()) {
default:
assert (false && "Unhandled RetEffect."); break;
-
- case RetEffect::NoRet: {
+
+ case RetEffect::NoRet: {
// Make up a symbol for the return value (not reference counted).
// FIXME: Most of this logic is not specific to the retain/release
// checker.
-
+
// FIXME: We eventually should handle structs and other compound types
// that are returned by value.
-
+
QualType T = Ex->getType();
-
+
if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())) {
unsigned Count = Builder.getCurrentBlockCount();
ValueManager &ValMgr = Eng.getValueManager();
SVal X = ValMgr.getConjuredSymbolVal(Ex, T, Count);
state = state->BindExpr(Ex, X, false);
- }
-
+ }
+
break;
}
-
+
case RetEffect::Alias: {
unsigned idx = RE.getIndex();
assert (arg_end >= arg_beg);
@@ -2917,20 +2916,20 @@
state = state->BindExpr(Ex, V, false);
break;
}
-
+
case RetEffect::ReceiverAlias: {
assert (Receiver);
SVal V = state->getSValAsScalarOrLoc(Receiver);
state = state->BindExpr(Ex, V, false);
break;
}
-
+
case RetEffect::OwnedAllocatedSymbol:
case RetEffect::OwnedSymbol: {
unsigned Count = Builder.getCurrentBlockCount();
- ValueManager &ValMgr = Eng.getValueManager();
+ ValueManager &ValMgr = Eng.getValueManager();
SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
- QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
state = state->set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(),
RetT));
state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
@@ -2941,31 +2940,31 @@
if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) {
bool isFeasible;
state = state.Assume(loc::SymbolVal(Sym), true, isFeasible);
- assert(isFeasible && "Cannot assume fresh symbol is non-null.");
+ assert(isFeasible && "Cannot assume fresh symbol is non-null.");
}
#endif
-
+
break;
}
-
+
case RetEffect::GCNotOwnedSymbol:
case RetEffect::NotOwnedSymbol: {
unsigned Count = Builder.getCurrentBlockCount();
ValueManager &ValMgr = Eng.getValueManager();
SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
- QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
state = state->set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(),
RetT));
state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
break;
}
}
-
+
// Generate a sink node if we are at the end of a path.
ExplodedNode *NewNode =
Summ.isEndPath() ? Builder.MakeSinkNode(Dst, Ex, Pred, state)
: Builder.MakeNode(Dst, Ex, Pred, state);
-
+
// Annotate the edge with summary we used.
if (NewNode) SummaryLog[NewNode] = &Summ;
}
@@ -2977,9 +2976,9 @@
CallExpr* CE, SVal L,
ExplodedNode* Pred) {
const FunctionDecl* FD = L.getAsFunctionDecl();
- RetainSummary* Summ = !FD ? Summaries.getDefaultSummary()
+ RetainSummary* Summ = !FD ? Summaries.getDefaultSummary()
: Summaries.getSummary(const_cast<FunctionDecl*>(FD));
-
+
assert(Summ);
EvalSummary(Dst, Eng, Builder, CE, 0, *Summ,
CE->arg_begin(), CE->arg_end(), Pred);
@@ -2989,9 +2988,9 @@
GRExprEngine& Eng,
GRStmtNodeBuilder& Builder,
ObjCMessageExpr* ME,
- ExplodedNode* Pred) {
+ ExplodedNode* Pred) {
RetainSummary* Summ = 0;
-
+
if (Expr* Receiver = ME->getReceiver()) {
// We need the type-information of the tracked receiver object
// Retrieve it from the state.
@@ -3005,7 +3004,7 @@
SVal V = St->getSValAsScalarOrLoc(Receiver);
SymbolRef Sym = V.getAsLocSymbol();
-
+
if (Sym) {
if (const RefVal* T = St->get<RefBindings>(Sym)) {
if (const ObjCObjectPointerType* PT =
@@ -3028,21 +3027,21 @@
// Special-case: are we sending a mesage to "self"?
// This is a hack. When we have full-IP this should be removed.
- if (isa<ObjCMethodDecl>(Pred->getLocationContext()->getDecl())) {
+ if (isa<ObjCMethodDecl>(Pred->getLocationContext()->getDecl())) {
if (Expr* Receiver = ME->getReceiver()) {
SVal X = St->getSValAsScalarOrLoc(Receiver);
- if (loc::MemRegionVal* L = dyn_cast<loc::MemRegionVal>(&X)) {
+ if (loc::MemRegionVal* L = dyn_cast<loc::MemRegionVal>(&X)) {
// Get the region associated with 'self'.
- const LocationContext *LC = Pred->getLocationContext();
+ const LocationContext *LC = Pred->getLocationContext();
if (const ImplicitParamDecl *SelfDecl = LC->getSelfDecl()) {
- SVal SelfVal = St->getSVal(St->getRegion(SelfDecl, LC));
+ SVal SelfVal = St->getSVal(St->getRegion(SelfDecl, LC));
if (L->getBaseRegion() == SelfVal.getAsRegion()) {
// Update the summary to make the default argument effect
// 'StopTracking'.
Summ = Summaries.copySummary(Summ);
Summ->setDefaultArgEffect(StopTracking);
}
- }
+ }
}
}
}
@@ -3070,18 +3069,18 @@
}
};
} // end anonymous namespace
-
-void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
- // Are we storing to something that causes the value to "escape"?
+
+void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
+ // Are we storing to something that causes the value to "escape"?
bool escapes = false;
-
+
// A value escapes in three possible cases (this may change):
//
// (1) we are binding to something that is not a memory region.
// (2) we are binding to a memregion that does not have stack storage
// (3) we are binding to a memregion with stack storage that the store
- // does not understand.
+ // does not understand.
const GRState *state = B.getState();
if (!isa<loc::MemRegionVal>(location))
@@ -3089,7 +3088,7 @@
else {
const MemRegion* R = cast<loc::MemRegionVal>(location).getRegion();
escapes = !R->hasStackStorage();
-
+
if (!escapes) {
// To test (3), generate a new state with the binding removed. If it is
// the same state, then it escapes (since the store cannot represent
@@ -3116,35 +3115,35 @@
GRStmtNodeBuilder& Builder,
ReturnStmt* S,
ExplodedNode* Pred) {
-
+
Expr* RetE = S->getRetValue();
if (!RetE)
return;
-
+
const GRState *state = Builder.GetState(Pred);
SymbolRef Sym = state->getSValAsScalarOrLoc(RetE).getAsLocSymbol();
-
+
if (!Sym)
return;
-
+
// Get the reference count binding (if any).
const RefVal* T = state->get<RefBindings>(Sym);
-
+
if (!T)
return;
-
- // Change the reference count.
- RefVal X = *T;
-
- switch (X.getKind()) {
- case RefVal::Owned: {
+
+ // Change the reference count.
+ RefVal X = *T;
+
+ switch (X.getKind()) {
+ case RefVal::Owned: {
unsigned cnt = X.getCount();
assert (cnt > 0);
X.setCount(cnt - 1);
X = X ^ RefVal::ReturnedOwned;
break;
}
-
+
case RefVal::NotOwned: {
unsigned cnt = X.getCount();
if (cnt) {
@@ -3156,39 +3155,39 @@
}
break;
}
-
- default:
+
+ default:
return;
}
-
+
// Update the binding.
state = state->set<RefBindings>(Sym, X);
Pred = Builder.MakeNode(Dst, S, Pred, state);
-
+
// Did we cache out?
if (!Pred)
return;
-
+
// Update the autorelease counts.
static unsigned autoreleasetag = 0;
GenericNodeBuilder Bd(Builder, S, &autoreleasetag);
bool stop = false;
llvm::tie(Pred, state) = HandleAutoreleaseCounts(state , Bd, Pred, Eng, Sym,
X, stop);
-
+
// Did we cache out?
if (!Pred || stop)
return;
-
+
// Get the updated binding.
T = state->get<RefBindings>(Sym);
assert(T);
X = *T;
-
+
// Any leaks or other errors?
if (X.isReturnedOwned() && X.getCount() == 0) {
- const Decl *CD = Eng.getAnalysisManager().getCodeDecl();
- if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ const Decl *CD = Eng.getAnalysisManager().getCodeDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
RetEffect RE = Summ.getRetEffect();
bool hasError = false;
@@ -3200,20 +3199,20 @@
// a leak (as the caller expects a GC'ed object) because no
// method should return ownership unless it returns a CF object.
X = X ^ RefVal::ErrorGCLeakReturned;
-
+
// Keep this false until this is properly tested.
hasError = true;
}
else if (!RE.isOwned()) {
// Either we are using GC and the returned object is a CF type
// or we aren't using GC. In either case, we expect that the
- // enclosing method is expected to return ownership.
+ // enclosing method is expected to return ownership.
hasError = true;
X = X ^ RefVal::ErrorLeakReturned;
}
}
-
- if (hasError) {
+
+ if (hasError) {
// Generate an error node.
static int ReturnOwnLeakTag = 0;
state = state->set<RefBindings>(Sym, X);
@@ -3227,16 +3226,16 @@
BR->EmitReport(report);
}
}
- }
+ }
}
else if (X.isReturnedNotOwned()) {
- const Decl *CD = Eng.getAnalysisManager().getCodeDecl();
+ const Decl *CD = Eng.getAnalysisManager().getCodeDecl();
if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
if (Summ.getRetEffect().isOwned()) {
// Trying to return a not owned object to a caller expecting an
// owned object.
-
+
static int ReturnNotOwnedForOwnedTag = 0;
state = state->set<RefBindings>(Sym, X ^ RefVal::ErrorReturnedNotOwned);
if (ExplodedNode *N =
@@ -3261,18 +3260,18 @@
// FIXME: We may add to the interface of EvalAssume the list of symbols
// whose assumptions have changed. For now we just iterate through the
// bindings and check if any of the tracked symbols are NULL. This isn't
- // too bad since the number of symbols we will track in practice are
+ // too bad since the number of symbols we will track in practice are
// probably small and EvalAssume is only called at branches and a few
// other places.
RefBindings B = state->get<RefBindings>();
-
+
if (B.isEmpty())
return state;
-
- bool changed = false;
+
+ bool changed = false;
RefBindings::Factory& RefBFactory = state->get_context<RefBindings>();
- for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
// Check if the symbol is null (or equal to any constant).
// If this is the case, stop tracking the symbol.
if (state->getSymVal(I.getKey())) {
@@ -3280,10 +3279,10 @@
B = RefBFactory.Remove(B, I.getKey());
}
}
-
+
if (changed)
state = state->set<RefBindings>(B);
-
+
return state;
}
@@ -3297,21 +3296,21 @@
case IncRefMsg: E = isGCEnabled() ? DoNothing : IncRef; break;
case DecRefMsg: E = isGCEnabled() ? DoNothing : DecRef; break;
case MakeCollectable: E = isGCEnabled() ? DecRef : DoNothing; break;
- case NewAutoreleasePool: E = isGCEnabled() ? DoNothing :
+ case NewAutoreleasePool: E = isGCEnabled() ? DoNothing :
NewAutoreleasePool; break;
}
-
+
// Handle all use-after-releases.
if (!isGCEnabled() && V.getKind() == RefVal::Released) {
V = V ^ RefVal::ErrorUseAfterRelease;
hasErr = V.getKind();
return state->set<RefBindings>(sym, V);
- }
-
+ }
+
switch (E) {
default:
assert (false && "Unhandled CFRef transition.");
-
+
case Dealloc:
// Any use of -dealloc in GC is *bad*.
if (isGCEnabled()) {
@@ -3319,7 +3318,7 @@
hasErr = V.getKind();
break;
}
-
+
switch (V.getKind()) {
default:
assert(false && "Invalid case.");
@@ -3332,13 +3331,13 @@
V = V ^ RefVal::ErrorDeallocNotOwned;
hasErr = V.getKind();
break;
- }
+ }
break;
case NewAutoreleasePool:
assert(!isGCEnabled());
return state->add<AutoreleaseStack>(sym);
-
+
case MayEscape:
if (V.getKind() == RefVal::Owned) {
V = V ^ RefVal::NotOwned;
@@ -3346,7 +3345,7 @@
}
// Fall-through.
-
+
case DoNothingByRef:
case DoNothing:
return state;
@@ -3354,7 +3353,7 @@
case Autorelease:
if (isGCEnabled())
return state;
-
+
// Update the autorelease counts.
state = SendAutorelease(state, ARCountFactory, sym);
V = V.autorelease();
@@ -3363,7 +3362,7 @@
case StopTracking:
return state->remove<RefBindings>(sym);
- case IncRef:
+ case IncRef:
switch (V.getKind()) {
default:
assert(false);
@@ -3371,15 +3370,15 @@
case RefVal::Owned:
case RefVal::NotOwned:
V = V + 1;
- break;
+ break;
case RefVal::Released:
// Non-GC cases are handled above.
assert(isGCEnabled());
V = (V ^ RefVal::Owned) + 1;
break;
- }
+ }
break;
-
+
case SelfOwn:
V = V ^ RefVal::NotOwned;
// Fall-through.
@@ -3394,23 +3393,23 @@
if (V.getCount() == 1) V = V ^ RefVal::Released;
V = V - 1;
break;
-
+
case RefVal::NotOwned:
if (V.getCount() > 0)
V = V - 1;
else {
V = V ^ RefVal::ErrorReleaseNotOwned;
hasErr = V.getKind();
- }
+ }
break;
-
+
case RefVal::Released:
// Non-GC cases are handled above.
assert(isGCEnabled());
V = V ^ RefVal::ErrorUseAfterRelease;
hasErr = V.getKind();
- break;
- }
+ break;
+ }
break;
}
return state->set<RefBindings>(sym, V);
@@ -3425,22 +3424,22 @@
ExplodedNode* Pred,
GRExprEngine &Eng,
SymbolRef Sym, RefVal V, bool &stop) {
-
+
unsigned ACnt = V.getAutoreleaseCount();
stop = false;
// No autorelease counts? Nothing to be done.
if (!ACnt)
return std::make_pair(Pred, state);
-
- assert(!isGCEnabled() && "Autorelease counts in GC mode?");
+
+ assert(!isGCEnabled() && "Autorelease counts in GC mode?");
unsigned Cnt = V.getCount();
-
+
// FIXME: Handle sending 'autorelease' to already released object.
if (V.getKind() == RefVal::ReturnedOwned)
++Cnt;
-
+
if (ACnt <= Cnt) {
if (ACnt == Cnt) {
V.clearCounts();
@@ -3457,7 +3456,7 @@
ExplodedNode *N = Bd.MakeNode(state, Pred);
stop = (N == 0);
return std::make_pair(N, state);
- }
+ }
// Woah! More autorelease counts then retain counts left.
// Emit hard error.
@@ -3467,7 +3466,7 @@
if (ExplodedNode *N = Bd.MakeNode(state, Pred)) {
N->markAsSink();
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Object over-autoreleased: object was sent -autorelease";
@@ -3479,26 +3478,26 @@
else
os << "+" << V.getCount();
os << " retain counts";
-
+
CFRefReport *report =
new CFRefReport(*static_cast<CFRefBug*>(overAutorelease),
*this, N, Sym, os.str().c_str());
BR->EmitReport(report);
}
-
+
return std::make_pair((ExplodedNode*)0, state);
}
const GRState *
CFRefCount::HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V,
llvm::SmallVectorImpl<SymbolRef> &Leaked) {
-
- bool hasLeak = V.isOwned() ||
+
+ bool hasLeak = V.isOwned() ||
((V.isNotOwned() || V.isReturnedOwned()) && V.getCount() > 0);
-
+
if (!hasLeak)
return state->remove<RefBindings>(sid);
-
+
Leaked.push_back(sid);
return state->set<RefBindings>(sid, V ^ RefVal::ErrorLeak);
}
@@ -3509,49 +3508,49 @@
GenericNodeBuilder &Builder,
GRExprEngine& Eng,
ExplodedNode *Pred) {
-
+
if (Leaked.empty())
return Pred;
-
+
// Generate an intermediate node representing the leak point.
ExplodedNode *N = Builder.MakeNode(state, Pred);
-
+
if (N) {
for (llvm::SmallVectorImpl<SymbolRef>::iterator
I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
-
- CFRefBug *BT = static_cast<CFRefBug*>(Pred ? leakWithinFunction
+
+ CFRefBug *BT = static_cast<CFRefBug*>(Pred ? leakWithinFunction
: leakAtReturn);
assert(BT && "BugType not initialized.");
CFRefLeakReport* report = new CFRefLeakReport(*BT, *this, N, *I, Eng);
BR->EmitReport(report);
}
}
-
+
return N;
}
void CFRefCount::EvalEndPath(GRExprEngine& Eng,
GREndPathNodeBuilder& Builder) {
-
+
const GRState *state = Builder.getState();
GenericNodeBuilder Bd(Builder);
- RefBindings B = state->get<RefBindings>();
+ RefBindings B = state->get<RefBindings>();
ExplodedNode *Pred = 0;
for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
bool stop = false;
llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng,
(*I).first,
- (*I).second, stop);
+ (*I).second, stop);
if (stop)
return;
}
-
- B = state->get<RefBindings>();
- llvm::SmallVector<SymbolRef, 10> Leaked;
-
+
+ B = state->get<RefBindings>();
+ llvm::SmallVector<SymbolRef, 10> Leaked;
+
for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
state = HandleSymbolDeath(state, (*I).first, (*I).second, Leaked);
@@ -3567,7 +3566,7 @@
SymbolReaper& SymReaper) {
RefBindings B = state->get<RefBindings>();
-
+
// Update counts from autorelease pools
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
@@ -3583,32 +3582,32 @@
return;
}
}
-
+
B = state->get<RefBindings>();
llvm::SmallVector<SymbolRef, 10> Leaked;
-
+
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
- E = SymReaper.dead_end(); I != E; ++I) {
+ E = SymReaper.dead_end(); I != E; ++I) {
if (const RefVal* T = B.lookup(*I))
state = HandleSymbolDeath(state, *I, *T, Leaked);
- }
-
+ }
+
static unsigned LeakPPTag = 0;
{
GenericNodeBuilder Bd(Builder, S, &LeakPPTag);
Pred = ProcessLeaks(state, Leaked, Bd, Eng, Pred);
}
-
+
// Did we cache out?
if (!Pred)
return;
-
+
// Now generate a new node that nukes the old bindings.
RefBindings::Factory& F = state->get_context<RefBindings>();
-
+
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I!=E; ++I) B = F.Remove(B, *I);
-
+
state = state->set<RefBindings>(B);
Builder.MakeNode(Dst, S, Pred, state);
}
@@ -3621,19 +3620,19 @@
RefVal::Kind hasErr, SymbolRef Sym) {
Builder.BuildSinks = true;
ExplodedNode *N = Builder.MakeNode(Dst, NodeExpr, Pred, St);
-
+
if (!N)
return;
-
+
CFRefBug *BT = 0;
-
+
switch (hasErr) {
default:
assert(false && "Unhandled error.");
return;
case RefVal::ErrorUseAfterRelease:
BT = static_cast<CFRefBug*>(useAfterRelease);
- break;
+ break;
case RefVal::ErrorReleaseNotOwned:
BT = static_cast<CFRefBug*>(releaseNotOwned);
break;
@@ -3644,7 +3643,7 @@
BT = static_cast<CFRefBug*>(deallocNotOwned);
break;
}
-
+
CFRefReport *report = new CFRefReport(*BT, *this, N, Sym);
report->addRange(ErrorExpr->getSourceRange());
BR->EmitReport(report);
@@ -3657,4 +3656,4 @@
GRTransferFuncs* clang::MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
const LangOptions& lopts) {
return new CFRefCount(Ctx, GCEnabled, lopts);
-}
+}
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index 1e28411..fdca1dc 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -41,8 +41,8 @@
void VisitChildren(Stmt *S) {
for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I != E;++I)
- if (*I)
- static_cast<CGBuilder*>(this)->Visit(*I);
+ if (*I)
+ static_cast<CGBuilder*>(this)->Visit(*I);
}
};
}
@@ -53,7 +53,7 @@
CallGraphNode *CalleeNode = G.getOrInsertFunction(Ent);
Decl *Parent = ASTLocation::FindImmediateParent(FD, CE);
-
+
CallerNode->addCallee(ASTLocation(Parent, CE), CalleeNode);
}
}
@@ -92,7 +92,7 @@
// Set root node to 'main' function.
if (FD->getNameAsString() == "main")
Root = Node;
-
+
CGBuilder builder(*this, FD, Ent, Node);
builder.Visit(FD->getBody());
}
@@ -118,9 +118,9 @@
void CallGraph::print(llvm::raw_ostream &os) {
for (iterator I = begin(), E = end(); I != E; ++I) {
if (I->second->hasCallee()) {
- os << "function: " << I->first.getPrintableName()
+ os << "function: " << I->first.getPrintableName()
<< " calls:\n";
- for (CallGraphNode::iterator CI = I->second->begin(),
+ for (CallGraphNode::iterator CI = I->second->begin(),
CE = I->second->end(); CI != CE; ++CI) {
os << " " << CI->second->getName().c_str();
}
@@ -139,13 +139,13 @@
namespace llvm {
-template <>
+template <>
struct DOTGraphTraits<CallGraph> : public DefaultDOTGraphTraits {
- static std::string getNodeLabel(const CallGraphNode *Node,
+ static std::string getNodeLabel(const CallGraphNode *Node,
const CallGraph &CG, bool ShortNames) {
return Node->getName();
-
+
}
};
diff --git a/lib/Analysis/CheckDeadStores.cpp b/lib/Analysis/CheckDeadStores.cpp
index 69433d6..716affb 100644
--- a/lib/Analysis/CheckDeadStores.cpp
+++ b/lib/Analysis/CheckDeadStores.cpp
@@ -33,14 +33,14 @@
BugReporter& BR;
ParentMap& Parents;
llvm::SmallPtrSet<VarDecl*, 20> Escaped;
-
+
enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
-
+
public:
DeadStoreObs(ASTContext &ctx, BugReporter& br, ParentMap& parents,
llvm::SmallPtrSet<VarDecl*, 20> &escaped)
: Ctx(ctx), BR(br), Parents(parents), Escaped(escaped) {}
-
+
virtual ~DeadStoreObs() {}
void Report(VarDecl* V, DeadStoreKind dsk, SourceLocation L, SourceRange R) {
@@ -48,27 +48,27 @@
return;
std::string name = V->getNameAsString();
-
+
const char* BugType = 0;
std::string msg;
-
+
switch (dsk) {
default:
assert(false && "Impossible dead store type.");
-
+
case DeadInit:
BugType = "Dead initialization";
msg = "Value stored to '" + name +
"' during its initialization is never read";
break;
-
+
case DeadIncrement:
BugType = "Dead increment";
case Standard:
if (!BugType) BugType = "Dead assignment";
msg = "Value stored to '" + name + "' is never read";
break;
-
+
case Enclosing:
BugType = "Dead nested assignment";
msg = "Although the value stored to '" + name +
@@ -76,10 +76,10 @@
" read from '" + name + "'";
break;
}
-
- BR.EmitBasicReport(BugType, "Dead store", msg.c_str(), L, R);
+
+ BR.EmitBasicReport(BugType, "Dead store", msg.c_str(), L, R);
}
-
+
void CheckVarDecl(VarDecl* VD, Expr* Ex, Expr* Val,
DeadStoreKind dsk,
const LiveVariables::AnalysisDataTy& AD,
@@ -87,60 +87,60 @@
if (VD->hasLocalStorage() && !Live(VD, AD) && !VD->getAttr<UnusedAttr>())
Report(VD, dsk, Ex->getSourceRange().getBegin(),
- Val->getSourceRange());
+ Val->getSourceRange());
}
-
+
void CheckDeclRef(DeclRefExpr* DR, Expr* Val, DeadStoreKind dsk,
const LiveVariables::AnalysisDataTy& AD,
const LiveVariables::ValTy& Live) {
-
+
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
CheckVarDecl(VD, DR, Val, dsk, AD, Live);
}
-
+
bool isIncrement(VarDecl* VD, BinaryOperator* B) {
if (B->isCompoundAssignmentOp())
return true;
-
+
Expr* RHS = B->getRHS()->IgnoreParenCasts();
BinaryOperator* BRHS = dyn_cast<BinaryOperator>(RHS);
-
+
if (!BRHS)
return false;
-
+
DeclRefExpr *DR;
-
+
if ((DR = dyn_cast<DeclRefExpr>(BRHS->getLHS()->IgnoreParenCasts())))
if (DR->getDecl() == VD)
return true;
-
+
if ((DR = dyn_cast<DeclRefExpr>(BRHS->getRHS()->IgnoreParenCasts())))
if (DR->getDecl() == VD)
return true;
-
+
return false;
}
-
+
virtual void ObserveStmt(Stmt* S,
const LiveVariables::AnalysisDataTy& AD,
const LiveVariables::ValTy& Live) {
-
+
// Skip statements in macros.
if (S->getLocStart().isMacroID())
return;
-
- if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
if (!B->isAssignmentOp()) return; // Skip non-assignments.
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()))
if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
Expr* RHS = B->getRHS()->IgnoreParenCasts();
-
+
// Special case: check for assigning null to a pointer.
- // This is a common form of defensive programming.
+ // This is a common form of defensive programming.
if (VD->getType()->isPointerType()) {
if (IntegerLiteral* L = dyn_cast<IntegerLiteral>(RHS))
- // FIXME: Probably should have an Expr::isNullPointerConstant.
+ // FIXME: Probably should have an Expr::isNullPointerConstant.
if (L->getValue() == 0)
return;
}
@@ -149,19 +149,19 @@
if (DeclRefExpr* RhsDR = dyn_cast<DeclRefExpr>(RHS))
if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
return;
-
+
// Otherwise, issue a warning.
DeadStoreKind dsk = Parents.isConsumedExpr(B)
- ? Enclosing
+ ? Enclosing
: (isIncrement(VD,B) ? DeadIncrement : Standard);
-
+
CheckVarDecl(VD, DR, B->getRHS(), dsk, AD, Live);
- }
+ }
}
else if (UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
if (!U->isIncrementOp())
return;
-
+
// Handle: ++x within a subexpression. The solution is not warn
// about preincrements to dead variables when the preincrement occurs
// as a subexpression. This can lead to false negatives, e.g. "(++x);"
@@ -170,21 +170,21 @@
return;
Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(Ex))
CheckDeclRef(DR, U, DeadIncrement, AD, Live);
- }
+ }
else if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
// Iterate through the decls. Warn if any initializers are complex
// expressions that are not live (never used).
for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
DI != DE; ++DI) {
-
+
VarDecl* V = dyn_cast<VarDecl>(*DI);
if (!V)
continue;
-
+
if (V->hasLocalStorage())
if (Expr* E = V->getInit()) {
// A dead initialization is a variable that is dead after it
@@ -200,7 +200,7 @@
// due to defensive programming.
if (E->isConstantInitializer(Ctx))
return;
-
+
// Special case: check for initializations from constant
// variables.
//
@@ -211,14 +211,14 @@
if (VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
if (VD->hasGlobalStorage() &&
VD->getType().isConstQualified()) return;
-
+
Report(V, DeadInit, V->getLocation(), E->getSourceRange());
}
}
}
}
};
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -230,9 +230,9 @@
CFG *cfg;
public:
FindEscaped(CFG *c) : cfg(c) {}
-
+
CFG& getCFG() { return *cfg; }
-
+
llvm::SmallPtrSet<VarDecl*, 20> Escaped;
void VisitUnaryOperator(UnaryOperator* U) {
@@ -249,11 +249,11 @@
}
};
} // end anonymous namespace
-
+
void clang::CheckDeadStores(LiveVariables& L, BugReporter& BR) {
FindEscaped FS(BR.getCFG());
- FS.getCFG().VisitBlockStmts(FS);
+ FS.getCFG().VisitBlockStmts(FS);
DeadStoreObs A(BR.getContext(), BR, BR.getParentMap(), FS.Escaped);
L.runOnAllBlocks(*BR.getCFG(), &A);
}
diff --git a/lib/Analysis/CheckNSError.cpp b/lib/Analysis/CheckNSError.cpp
index 0b9ae04..7e59643 100644
--- a/lib/Analysis/CheckNSError.cpp
+++ b/lib/Analysis/CheckNSError.cpp
@@ -32,37 +32,37 @@
const bool isNSErrorWarning;
IdentifierInfo * const II;
GRExprEngine &Eng;
-
+
void CheckSignature(const ObjCMethodDecl& MD, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
-
+
void CheckSignature(const FunctionDecl& MD, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
bool CheckNSErrorArgument(QualType ArgTy);
bool CheckCFErrorArgument(QualType ArgTy);
-
+
void CheckParamDeref(const VarDecl *V, const LocationContext *LC,
const GRState *state, BugReporter& BR);
-
+
void EmitRetTyWarning(BugReporter& BR, const Decl& CodeDecl);
-
+
public:
NSErrorCheck(const Decl &D, bool isNSError, GRExprEngine& eng)
- : BugType(isNSError ? "NSError** null dereference"
+ : BugType(isNSError ? "NSError** null dereference"
: "CFErrorRef* null dereference",
"Coding conventions (Apple)"),
CodeDecl(D),
- isNSErrorWarning(isNSError),
+ isNSErrorWarning(isNSError),
II(&eng.getContext().Idents.get(isNSErrorWarning ? "NSError":"CFErrorRef")),
Eng(eng) {}
-
+
void FlushReports(BugReporter& BR);
-};
-
+};
+
} // end anonymous namespace
-void clang::RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng,
+void clang::RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng,
const Decl &D) {
BR.Register(new NSErrorCheck(D, true, Eng));
BR.Register(new NSErrorCheck(D, false, Eng));
@@ -71,7 +71,7 @@
void NSErrorCheck::FlushReports(BugReporter& BR) {
// Get the analysis engine and the exploded analysis graph.
ExplodedGraph& G = Eng.getGraph();
-
+
// Get the ASTContext, which is useful for querying type information.
ASTContext &Ctx = BR.getContext();
@@ -84,17 +84,17 @@
CheckSignature(*FD, ResultTy, ErrorParams);
else
return;
-
+
if (ErrorParams.empty())
return;
-
+
if (ResultTy == Ctx.VoidTy) EmitRetTyWarning(BR, CodeDecl);
-
- for (ExplodedGraph::roots_iterator RI=G.roots_begin(), RE=G.roots_end();
+
+ for (ExplodedGraph::roots_iterator RI=G.roots_begin(), RE=G.roots_end();
RI!=RE; ++RI) {
// Scan the parameters for an implicit null dereference.
for (llvm::SmallVectorImpl<VarDecl*>::iterator I=ErrorParams.begin(),
- E=ErrorParams.end(); I!=E; ++I)
+ E=ErrorParams.end(); I!=E; ++I)
CheckParamDeref(*I, (*RI)->getLocationContext(), (*RI)->getState(), BR);
}
}
@@ -102,17 +102,17 @@
void NSErrorCheck::EmitRetTyWarning(BugReporter& BR, const Decl& CodeDecl) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
if (isa<ObjCMethodDecl>(CodeDecl))
os << "Method";
else
- os << "Function";
-
+ os << "Function";
+
os << " accepting ";
os << (isNSErrorWarning ? "NSError**" : "CFErrorRef*");
os << " should have a non-void return value to indicate whether or not an "
"error occurred";
-
+
BR.EmitBasicReport(isNSErrorWarning
? "Bad return type when passing NSError**"
: "Bad return type when passing CFError*",
@@ -125,11 +125,11 @@
llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
ResultTy = M.getResultType();
-
- for (ObjCMethodDecl::param_iterator I=M.param_begin(),
+
+ for (ObjCMethodDecl::param_iterator I=M.param_begin(),
E=M.param_end(); I!=E; ++I) {
- QualType T = (*I)->getType();
+ QualType T = (*I)->getType();
if (isNSErrorWarning) {
if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
@@ -142,14 +142,14 @@
void
NSErrorCheck::CheckSignature(const FunctionDecl& F, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
-
+
ResultTy = F.getResultType();
-
- for (FunctionDecl::param_const_iterator I = F.param_begin(),
+
+ for (FunctionDecl::param_const_iterator I = F.param_begin(),
E = F.param_end(); I != E; ++I) {
-
- QualType T = (*I)->getType();
-
+
+ QualType T = (*I)->getType();
+
if (isNSErrorWarning) {
if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
}
@@ -160,31 +160,31 @@
bool NSErrorCheck::CheckNSErrorArgument(QualType ArgTy) {
-
+
const PointerType* PPT = ArgTy->getAs<PointerType>();
if (!PPT)
return false;
-
+
const ObjCObjectPointerType* PT =
PPT->getPointeeType()->getAsObjCObjectPointerType();
if (!PT)
return false;
-
+
const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
-
+
// FIXME: Can ID ever be NULL?
if (ID)
return II == ID->getIdentifier();
-
+
return false;
}
bool NSErrorCheck::CheckCFErrorArgument(QualType ArgTy) {
-
+
const PointerType* PPT = ArgTy->getAs<PointerType>();
if (!PPT) return false;
-
+
const TypedefType* TT = PPT->getPointeeType()->getAsTypedefType();
if (!TT) return false;
@@ -195,24 +195,24 @@
const LocationContext *LC,
const GRState *rootState,
BugReporter& BR) {
-
+
SVal ParamL = rootState->getLValue(Param, LC);
const MemRegion* ParamR = cast<loc::MemRegionVal>(ParamL).getRegionAs<VarRegion>();
assert (ParamR && "Parameters always have VarRegions.");
SVal ParamSVal = rootState->getSVal(ParamR);
-
+
// FIXME: For now assume that ParamSVal is symbolic. We need to generalize
// this later.
SymbolRef ParamSym = ParamSVal.getAsLocSymbol();
if (!ParamSym)
return;
-
+
// Iterate over the implicit-null dereferences.
for (GRExprEngine::null_deref_iterator I=Eng.implicit_null_derefs_begin(),
E=Eng.implicit_null_derefs_end(); I!=E; ++I) {
-
+
const GRState *state = (*I)->getState();
- const SVal* X = state->get<GRState::NullDerefTag>();
+ const SVal* X = state->get<GRState::NullDerefTag>();
if (!X || X->getAsSymbol() != ParamSym)
continue;
@@ -221,14 +221,14 @@
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Potential null dereference. According to coding standards ";
-
+
if (isNSErrorWarning)
os << "in 'Creating and Returning NSError Objects' the parameter '";
else
os << "documented in CoreFoundation/CFError.h the parameter '";
-
+
os << Param->getNameAsString() << "' may be null.";
-
+
BugReport *report = new BugReport(*this, os.str().c_str(), *I);
// FIXME: Notable symbols are now part of the report. We should
// add support for notable symbols in BugReport.
diff --git a/lib/Analysis/CheckObjCDealloc.cpp b/lib/Analysis/CheckObjCDealloc.cpp
index 3392fcf..d89edff 100644
--- a/lib/Analysis/CheckObjCDealloc.cpp
+++ b/lib/Analysis/CheckObjCDealloc.cpp
@@ -24,11 +24,11 @@
using namespace clang;
-static bool scan_dealloc(Stmt* S, Selector Dealloc) {
-
+static bool scan_dealloc(Stmt* S, Selector Dealloc) {
+
if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
if (ME->getSelector() == Dealloc)
- if(ME->getReceiver())
+ if (ME->getReceiver())
if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
return isa<ObjCSuperExpr>(Receiver);
@@ -37,20 +37,20 @@
for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
if (*I && scan_dealloc(*I, Dealloc))
return true;
-
+
return false;
}
-static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
- const ObjCPropertyDecl* PD,
- Selector Release,
+static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
+ const ObjCPropertyDecl* PD,
+ Selector Release,
IdentifierInfo* SelfII,
- ASTContext& Ctx) {
-
+ ASTContext& Ctx) {
+
// [mMyIvar release]
if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
if (ME->getSelector() == Release)
- if(ME->getReceiver())
+ if (ME->getReceiver())
if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
if (ObjCIvarRefExpr* E = dyn_cast<ObjCIvarRefExpr>(Receiver))
if (E->getDecl() == ID)
@@ -58,7 +58,7 @@
// [self setMyIvar:nil];
if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
- if(ME->getReceiver())
+ if (ME->getReceiver())
if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
if (DeclRefExpr* E = dyn_cast<DeclRefExpr>(Receiver))
if (E->getDecl()->getIdentifier() == SelfII)
@@ -66,19 +66,19 @@
ME->getNumArgs() == 1 &&
ME->getArg(0)->isNullPointerConstant(Ctx))
return true;
-
+
// self.myIvar = nil;
if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
if (BO->isAssignmentOp())
- if(ObjCPropertyRefExpr* PRE =
+ if (ObjCPropertyRefExpr* PRE =
dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
- if(PRE->getProperty() == PD)
- if(BO->getRHS()->isNullPointerConstant(Ctx)) {
+ if (PRE->getProperty() == PD)
+ if (BO->getRHS()->isNullPointerConstant(Ctx)) {
// This is only a 'release' if the property kind is not
// 'assign'.
return PD->getSetterKind() != ObjCPropertyDecl::Assign;;
}
-
+
// Recurse to children.
for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
if (*I && scan_ivar_release(*I, ID, PD, Release, SelfII, Ctx))
@@ -91,39 +91,39 @@
const LangOptions& LOpts, BugReporter& BR) {
assert (LOpts.getGCMode() != LangOptions::GCOnly);
-
+
ASTContext& Ctx = BR.getContext();
const ObjCInterfaceDecl* ID = D->getClassInterface();
-
+
// Does the class contain any ivars that are pointers (or id<...>)?
// If not, skip the check entirely.
// NOTE: This is motivated by PR 2517:
// http://llvm.org/bugs/show_bug.cgi?id=2517
-
+
bool containsPointerIvar = false;
-
+
for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
I!=E; ++I) {
-
+
ObjCIvarDecl* ID = *I;
QualType T = ID->getType();
-
+
if (!T->isObjCObjectPointerType() ||
ID->getAttr<IBOutletAttr>()) // Skip IBOutlets.
continue;
-
+
containsPointerIvar = true;
break;
}
-
+
if (!containsPointerIvar)
return;
-
+
// Determine if the class subclasses NSObject.
IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
-
+
for ( ; ID ; ID = ID->getSuperClass()) {
IdentifierInfo *II = ID->getIdentifier();
@@ -137,118 +137,118 @@
if (II == SenTestCaseII)
return;
}
-
+
if (!ID)
return;
-
+
// Get the "dealloc" selector.
IdentifierInfo* II = &Ctx.Idents.get("dealloc");
- Selector S = Ctx.Selectors.getSelector(0, &II);
+ Selector S = Ctx.Selectors.getSelector(0, &II);
ObjCMethodDecl* MD = 0;
-
+
// Scan the instance methods for "dealloc".
for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
E = D->instmeth_end(); I!=E; ++I) {
-
+
if ((*I)->getSelector() == S) {
MD = *I;
break;
- }
+ }
}
-
+
if (!MD) { // No dealloc found.
-
- const char* name = LOpts.getGCMode() == LangOptions::NonGC
- ? "missing -dealloc"
+
+ const char* name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing -dealloc"
: "missing -dealloc (Hybrid MM, non-GC)";
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
os << "Objective-C class '" << D->getNameAsString()
<< "' lacks a 'dealloc' instance method";
-
+
BR.EmitBasicReport(name, os.str().c_str(), D->getLocStart());
return;
}
-
+
// dealloc found. Scan for missing [super dealloc].
if (MD->getBody() && !scan_dealloc(MD->getBody(), S)) {
-
+
const char* name = LOpts.getGCMode() == LangOptions::NonGC
? "missing [super dealloc]"
: "missing [super dealloc] (Hybrid MM, non-GC)";
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
os << "The 'dealloc' instance method in Objective-C class '"
<< D->getNameAsString()
<< "' does not send a 'dealloc' message to its super class"
" (missing [super dealloc])";
-
+
BR.EmitBasicReport(name, os.str().c_str(), D->getLocStart());
return;
- }
-
+ }
+
// Get the "release" selector.
IdentifierInfo* RII = &Ctx.Idents.get("release");
- Selector RS = Ctx.Selectors.getSelector(0, &RII);
-
+ Selector RS = Ctx.Selectors.getSelector(0, &RII);
+
// Get the "self" identifier
IdentifierInfo* SelfII = &Ctx.Idents.get("self");
-
+
// Scan for missing and extra releases of ivars used by implementations
// of synthesized properties
for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
E = D->propimpl_end(); I!=E; ++I) {
// We can only check the synthesized properties
- if((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ if ((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
continue;
-
+
ObjCIvarDecl* ID = (*I)->getPropertyIvarDecl();
if (!ID)
continue;
-
+
QualType T = ID->getType();
if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
continue;
const ObjCPropertyDecl* PD = (*I)->getPropertyDecl();
- if(!PD)
+ if (!PD)
continue;
-
+
// ivars cannot be set via read-only properties, so we'll skip them
- if(PD->isReadOnly())
+ if (PD->isReadOnly())
continue;
-
+
// ivar must be released if and only if the kind of setter was not 'assign'
bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
- if(scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
+ if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
!= requiresRelease) {
const char *name;
const char* category = "Memory (Core Foundation/Objective-C)";
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
- if(requiresRelease) {
+ if (requiresRelease) {
name = LOpts.getGCMode() == LangOptions::NonGC
? "missing ivar release (leak)"
: "missing ivar release (Hybrid MM, non-GC)";
-
+
os << "The '" << ID->getNameAsString()
<< "' instance variable was retained by a synthesized property but "
- "wasn't released in 'dealloc'";
+ "wasn't released in 'dealloc'";
} else {
name = LOpts.getGCMode() == LangOptions::NonGC
? "extra ivar release (use-after-release)"
: "extra ivar release (Hybrid MM, non-GC)";
-
+
os << "The '" << ID->getNameAsString()
<< "' instance variable was not retained by a synthesized property "
"but was released in 'dealloc'";
}
-
+
BR.EmitBasicReport(name, category,
os.str().c_str(), (*I)->getLocation());
}
diff --git a/lib/Analysis/CheckObjCInstMethSignature.cpp b/lib/Analysis/CheckObjCInstMethSignature.cpp
index aae1e1d..8c0d396 100644
--- a/lib/Analysis/CheckObjCInstMethSignature.cpp
+++ b/lib/Analysis/CheckObjCInstMethSignature.cpp
@@ -40,14 +40,14 @@
const ObjCMethodDecl *MethAncestor,
BugReporter &BR, ASTContext &Ctx,
const ObjCImplementationDecl *ID) {
-
+
QualType ResDerived = MethDerived->getResultType();
- QualType ResAncestor = MethAncestor->getResultType();
-
+ QualType ResAncestor = MethAncestor->getResultType();
+
if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "The Objective-C class '"
<< MethDerived->getClassInterface()->getNameAsString()
<< "', which is derived from class '"
@@ -63,7 +63,7 @@
<< ResAncestor.getAsString()
<< "'. These two types are incompatible, and may result in undefined "
"behavior for clients of these classes.";
-
+
BR.EmitBasicReport("Incompatible instance method return type",
os.str().c_str(), MethDerived->getLocStart());
}
@@ -71,23 +71,23 @@
void clang::CheckObjCInstMethSignature(const ObjCImplementationDecl* ID,
BugReporter& BR) {
-
+
const ObjCInterfaceDecl* D = ID->getClassInterface();
const ObjCInterfaceDecl* C = D->getSuperClass();
if (!C)
return;
-
+
ASTContext& Ctx = BR.getContext();
-
+
// Build a DenseMap of the methods for quick querying.
typedef llvm::DenseMap<Selector,ObjCMethodDecl*> MapTy;
MapTy IMeths;
unsigned NumMethods = 0;
-
+
for (ObjCImplementationDecl::instmeth_iterator I=ID->instmeth_begin(),
- E=ID->instmeth_end(); I!=E; ++I) {
-
+ E=ID->instmeth_end(); I!=E; ++I) {
+
ObjCMethodDecl* M = *I;
IMeths[M->getSelector()] = M;
++NumMethods;
@@ -101,19 +101,19 @@
ObjCMethodDecl* M = *I;
Selector S = M->getSelector();
-
+
MapTy::iterator MI = IMeths.find(S);
if (MI == IMeths.end() || MI->second == 0)
continue;
-
+
--NumMethods;
ObjCMethodDecl* MethDerived = MI->second;
MI->second = 0;
-
+
CompareReturnTypes(MethDerived, M, BR, Ctx, ID);
}
-
+
C = C->getSuperClass();
}
}
diff --git a/lib/Analysis/CheckObjCUnusedIVars.cpp b/lib/Analysis/CheckObjCUnusedIVars.cpp
index 7547097..1a900f8 100644
--- a/lib/Analysis/CheckObjCUnusedIVars.cpp
+++ b/lib/Analysis/CheckObjCUnusedIVars.cpp
@@ -29,7 +29,7 @@
static void Scan(IvarUsageMap& M, const Stmt* S) {
if (!S)
return;
-
+
if (const ObjCIvarRefExpr *Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
const ObjCIvarDecl *D = Ex->getDecl();
IvarUsageMap::iterator I = M.find(D);
@@ -37,7 +37,7 @@
I->second = Used;
return;
}
-
+
// Blocks can reference an instance variable of a class.
if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
Scan(M, BE->getBody());
@@ -51,12 +51,12 @@
static void Scan(IvarUsageMap& M, const ObjCPropertyImplDecl* D) {
if (!D)
return;
-
+
const ObjCIvarDecl* ID = D->getPropertyIvarDecl();
if (!ID)
return;
-
+
IvarUsageMap::iterator I = M.find(ID);
if (I != M.end())
I->second = Used;
@@ -71,9 +71,9 @@
// Iterate over the ivars.
for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(),
E=ID->ivar_end(); I!=E; ++I) {
-
+
const ObjCIvarDecl* ID = *I;
-
+
// Ignore ivars that aren't private.
if (ID->getAccessControl() != ObjCIvarDecl::Private)
continue;
@@ -81,31 +81,31 @@
// Skip IB Outlets.
if (ID->getAttr<IBOutletAttr>())
continue;
-
+
M[ID] = Unused;
}
if (M.empty())
return;
-
+
// Now scan the methods for accesses.
for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
E = D->instmeth_end(); I!=E; ++I)
Scan(M, (*I)->getBody());
-
+
// Scan for @synthesized property methods that act as setters/getters
// to an ivar.
for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
E = D->propimpl_end(); I!=E; ++I)
- Scan(M, *I);
-
+ Scan(M, *I);
+
// Find ivars that are unused.
for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
if (I->second == Unused) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Instance variable '" << I->first->getNameAsString()
- << "' in class '" << ID->getNameAsString()
+ << "' in class '" << ID->getNameAsString()
<< "' is never used by the methods in its @implementation "
"(although it may be used by category methods).";
diff --git a/lib/Analysis/CheckSecuritySyntaxOnly.cpp b/lib/Analysis/CheckSecuritySyntaxOnly.cpp
index 1bed9d1..9f0d059 100644
--- a/lib/Analysis/CheckSecuritySyntaxOnly.cpp
+++ b/lib/Analysis/CheckSecuritySyntaxOnly.cpp
@@ -21,18 +21,18 @@
namespace {
class VISIBILITY_HIDDEN WalkAST : public StmtVisitor<WalkAST> {
- BugReporter &BR;
+ BugReporter &BR;
IdentifierInfo *II_gets;
enum { num_rands = 9 };
IdentifierInfo *II_rand[num_rands];
IdentifierInfo *II_random;
enum { num_setids = 6 };
IdentifierInfo *II_setid[num_setids];
-
+
public:
WalkAST(BugReporter &br) : BR(br),
II_gets(0), II_rand(), II_random(0), II_setid() {}
-
+
// Statement visitor methods.
void VisitCallExpr(CallExpr *CE);
void VisitForStmt(ForStmt *S);
@@ -40,10 +40,10 @@
void VisitStmt(Stmt *S) { VisitChildren(S); }
void VisitChildren(Stmt *S);
-
+
// Helpers.
IdentifierInfo *GetIdentifier(IdentifierInfo *& II, const char *str);
-
+
// Checker-specific methods.
void CheckLoopConditionForFloat(const ForStmt *FS);
void CheckCall_gets(const CallExpr *CE, const FunctionDecl *FD);
@@ -60,8 +60,8 @@
IdentifierInfo *WalkAST::GetIdentifier(IdentifierInfo *& II, const char *str) {
if (!II)
II = &BR.getContext().Idents.get(str);
-
- return II;
+
+ return II;
}
//===----------------------------------------------------------------------===//
@@ -80,23 +80,22 @@
CheckCall_rand(CE, FD);
CheckCall_random(CE, FD);
}
-
+
// Recurse and check children.
VisitChildren(CE);
}
void WalkAST::VisitCompoundStmt(CompoundStmt *S) {
for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
- if (Stmt *child = *I)
- {
- if (CallExpr *CE = dyn_cast<CallExpr>(child))
- CheckUncheckedReturnValue(CE);
- Visit(child);
- }
+ if (Stmt *child = *I) {
+ if (CallExpr *CE = dyn_cast<CallExpr>(child))
+ CheckUncheckedReturnValue(CE);
+ Visit(child);
+ }
}
void WalkAST::VisitForStmt(ForStmt *FS) {
- CheckLoopConditionForFloat(FS);
+ CheckLoopConditionForFloat(FS);
// Recurse and check children.
VisitChildren(FS);
@@ -111,26 +110,26 @@
static const DeclRefExpr*
GetIncrementedVar(const Expr *expr, const VarDecl *x, const VarDecl *y) {
expr = expr->IgnoreParenCasts();
-
- if (const BinaryOperator *B = dyn_cast<BinaryOperator>(expr)) {
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(expr)) {
if (!(B->isAssignmentOp() || B->isCompoundAssignmentOp() ||
B->getOpcode() == BinaryOperator::Comma))
return NULL;
-
+
if (const DeclRefExpr *lhs = GetIncrementedVar(B->getLHS(), x, y))
return lhs;
-
+
if (const DeclRefExpr *rhs = GetIncrementedVar(B->getRHS(), x, y))
return rhs;
-
+
return NULL;
}
-
+
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(expr)) {
const NamedDecl *ND = DR->getDecl();
return ND == x || ND == y ? DR : NULL;
}
-
+
if (const UnaryOperator *U = dyn_cast<UnaryOperator>(expr))
return U->isIncrementDecrementOp()
? GetIncrementedVar(U->getSubExpr(), x, y) : NULL;
@@ -145,68 +144,68 @@
void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) {
// Does the loop have a condition?
const Expr *condition = FS->getCond();
-
+
if (!condition)
return;
// Does the loop have an increment?
const Expr *increment = FS->getInc();
-
+
if (!increment)
return;
-
+
// Strip away '()' and casts.
condition = condition->IgnoreParenCasts();
increment = increment->IgnoreParenCasts();
-
+
// Is the loop condition a comparison?
const BinaryOperator *B = dyn_cast<BinaryOperator>(condition);
if (!B)
return;
-
+
// Is this a comparison?
if (!(B->isRelationalOp() || B->isEqualityOp()))
return;
-
+
// Are we comparing variables?
const DeclRefExpr *drLHS = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParens());
const DeclRefExpr *drRHS = dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParens());
-
+
// Does at least one of the variables have a floating point type?
drLHS = drLHS && drLHS->getType()->isFloatingType() ? drLHS : NULL;
drRHS = drRHS && drRHS->getType()->isFloatingType() ? drRHS : NULL;
-
+
if (!drLHS && !drRHS)
return;
const VarDecl *vdLHS = drLHS ? dyn_cast<VarDecl>(drLHS->getDecl()) : NULL;
const VarDecl *vdRHS = drRHS ? dyn_cast<VarDecl>(drRHS->getDecl()) : NULL;
-
+
if (!vdLHS && !vdRHS)
- return;
-
+ return;
+
// Does either variable appear in increment?
const DeclRefExpr *drInc = GetIncrementedVar(increment, vdLHS, vdRHS);
-
+
if (!drInc)
return;
-
+
// Emit the error. First figure out which DeclRefExpr in the condition
// referenced the compared variable.
const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
- llvm::SmallVector<SourceRange, 2> ranges;
+ llvm::SmallVector<SourceRange, 2> ranges;
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "Variable '" << drCond->getDecl()->getNameAsCString()
<< "' with floating point type '" << drCond->getType().getAsString()
<< "' should not be used as a loop counter";
ranges.push_back(drCond->getSourceRange());
ranges.push_back(drInc->getSourceRange());
-
+
const char *bugType = "Floating point variable used as loop counter";
BR.EmitBasicReport(bugType, "Security", os.str().c_str(),
FS->getLocStart(), ranges.data(), ranges.size());
@@ -221,11 +220,11 @@
void WalkAST::CheckCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
if (FD->getIdentifier() != GetIdentifier(II_gets, "gets"))
return;
-
+
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
if (!FTP)
return;
-
+
// Verify that the function takes a single argument.
if (FTP->getNumArgs() != 1)
return;
@@ -234,10 +233,10 @@
const PointerType *PT = dyn_cast<PointerType>(FTP->getArgType(0));
if (!PT)
return;
-
+
if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
return;
-
+
// Issue a warning.
SourceRange R = CE->getCallee()->getSourceRange();
BR.EmitBasicReport("Potential buffer overflow in call to 'gets'",
@@ -261,11 +260,11 @@
"lcong48",
"rand", "rand_r"
};
-
+
for (size_t i = 0; i < num_rands; i++)
- II_rand[i] = &BR.getContext().Idents.get(identifiers[i]);
+ II_rand[i] = &BR.getContext().Idents.get(identifiers[i]);
}
-
+
const IdentifierInfo *id = FD->getIdentifier();
size_t identifierid;
@@ -275,24 +274,24 @@
if (identifierid >= num_rands)
return;
-
+
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
if (!FTP)
return;
-
+
if (FTP->getNumArgs() == 1) {
// Is the argument an 'unsigned short *'?
// (Actually any integer type is allowed.)
const PointerType *PT = dyn_cast<PointerType>(FTP->getArgType(0));
if (!PT)
return;
-
+
if (! PT->getPointeeType()->isIntegerType())
return;
}
- else if (FTP->getNumArgs() != 0)
+ else if (FTP->getNumArgs() != 0)
return;
-
+
// Issue a warning.
std::string buf1;
llvm::raw_string_ostream os1(buf1);
@@ -305,7 +304,7 @@
<< " Use 'arc4random' instead";
SourceRange R = CE->getCallee()->getSourceRange();
-
+
BR.EmitBasicReport(os1.str().c_str(), "Security", os2.str().c_str(),
CE->getLocStart(), &R, 1);
}
@@ -318,15 +317,15 @@
void WalkAST::CheckCall_random(const CallExpr *CE, const FunctionDecl *FD) {
if (FD->getIdentifier() != GetIdentifier(II_random, "random"))
return;
-
+
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
if (!FTP)
return;
-
+
// Verify that the function takes no argument.
if (FTP->getNumArgs() != 0)
return;
-
+
// Issue a warning.
SourceRange R = CE->getCallee()->getSourceRange();
BR.EmitBasicReport("'random' is not a secure random number generator",
@@ -352,11 +351,11 @@
"setuid", "setgid", "seteuid", "setegid",
"setreuid", "setregid"
};
-
+
for (size_t i = 0; i < num_setids; i++)
- II_setid[i] = &BR.getContext().Idents.get(identifiers[i]);
+ II_setid[i] = &BR.getContext().Idents.get(identifiers[i]);
}
-
+
const IdentifierInfo *id = FD->getIdentifier();
size_t identifierid;
@@ -366,11 +365,11 @@
if (identifierid >= num_setids)
return;
-
+
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
if (!FTP)
return;
-
+
// Verify that the function takes one or two arguments (depending on
// the function).
if (FTP->getNumArgs() != (identifierid < 4 ? 1 : 2))
@@ -395,7 +394,7 @@
<< "', the following code may execute with unexpected privileges";
SourceRange R = CE->getCallee()->getSourceRange();
-
+
BR.EmitBasicReport(os1.str().c_str(), "Security", os2.str().c_str(),
CE->getLocStart(), &R, 1);
}
@@ -404,7 +403,7 @@
// Entry point for check.
//===----------------------------------------------------------------------===//
-void clang::CheckSecuritySyntaxOnly(const Decl *D, BugReporter &BR) {
+void clang::CheckSecuritySyntaxOnly(const Decl *D, BugReporter &BR) {
WalkAST walker(BR);
- walker.Visit(D->getBody());
+ walker.Visit(D->getBody());
}
diff --git a/lib/Analysis/Environment.cpp b/lib/Analysis/Environment.cpp
index 0b8ee66..1610ad4 100644
--- a/lib/Analysis/Environment.cpp
+++ b/lib/Analysis/Environment.cpp
@@ -18,61 +18,61 @@
using namespace clang;
SVal Environment::GetSVal(const Stmt *E, ValueManager& ValMgr) const {
-
+
for (;;) {
-
+
switch (E->getStmtClass()) {
-
- case Stmt::AddrLabelExprClass:
+
+ case Stmt::AddrLabelExprClass:
return ValMgr.makeLoc(cast<AddrLabelExpr>(E));
-
+
// ParenExprs are no-ops.
-
- case Stmt::ParenExprClass:
+
+ case Stmt::ParenExprClass:
E = cast<ParenExpr>(E)->getSubExpr();
continue;
-
+
case Stmt::CharacterLiteralClass: {
const CharacterLiteral* C = cast<CharacterLiteral>(E);
return ValMgr.makeIntVal(C->getValue(), C->getType());
}
-
+
case Stmt::IntegerLiteralClass: {
return ValMgr.makeIntVal(cast<IntegerLiteral>(E));
}
-
+
// Casts where the source and target type are the same
// are no-ops. We blast through these to get the descendant
// subexpression that has a value.
-
+
case Stmt::ImplicitCastExprClass:
case Stmt::CStyleCastExprClass: {
const CastExpr* C = cast<CastExpr>(E);
QualType CT = C->getType();
-
+
if (CT->isVoidType())
return UnknownVal();
-
+
break;
}
-
+
// Handle all other Stmt* using a lookup.
-
+
default:
break;
};
-
+
break;
}
-
+
return LookupExpr(E);
}
Environment EnvironmentManager::BindExpr(Environment Env, const Stmt *S,
- SVal V, bool Invalidate) {
+ SVal V, bool Invalidate) {
assert(S);
-
- if (V.isUnknown()) {
+
+ if (V.isUnknown()) {
if (Invalidate)
return Environment(F.Remove(Env.ExprBindings, S), Env.ACtx);
else
@@ -86,7 +86,7 @@
class VISIBILITY_HIDDEN MarkLiveCallback : public SymbolVisitor {
SymbolReaper &SymReaper;
public:
- MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+ MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
bool VisitSymbol(SymbolRef sym) { SymReaper.markLive(sym); return true; }
};
} // end anonymous namespace
@@ -95,45 +95,45 @@
// - Remove subexpression bindings.
// - Remove dead block expression bindings.
// - Keep live block expression bindings:
-// - Mark their reachable symbols live in SymbolReaper,
+// - Mark their reachable symbols live in SymbolReaper,
// see ScanReachableSymbols.
// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
-Environment
+Environment
EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S,
SymbolReaper &SymReaper,
const GRState *ST,
llvm::SmallVectorImpl<const MemRegion*> &DRoots) {
-
+
CFG &C = *Env.getAnalysisContext().getCFG();
-
+
// We construct a new Environment object entirely, as this is cheaper than
// individually removing all the subexpression bindings (which will greatly
// outnumber block-level expression bindings).
Environment NewEnv = getInitialEnvironment(&Env.getAnalysisContext());
-
+
// Iterate over the block-expr bindings.
- for (Environment::iterator I = Env.begin(), E = Env.end();
+ for (Environment::iterator I = Env.begin(), E = Env.end();
I != E; ++I) {
-
+
const Stmt *BlkExpr = I.getKey();
-
+
// Not a block-level expression?
if (!C.isBlkExpr(BlkExpr))
continue;
-
+
const SVal &X = I.getData();
-
+
if (SymReaper.isLive(S, BlkExpr)) {
// Copy the binding to the new map.
NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
-
+
// If the block expr's value is a memory region, then mark that region.
if (isa<loc::MemRegionVal>(X)) {
const MemRegion* R = cast<loc::MemRegionVal>(X).getRegion();
DRoots.push_back(R);
// Mark the super region of the RX as live.
- // e.g.: int x; char *y = (char*) &x; if (*y) ...
+ // e.g.: int x; char *y = (char*) &x; if (*y) ...
// 'y' => element region. 'x' is its super region.
// We only add one level super region for now.
diff --git a/lib/Analysis/ExplodedGraph.cpp b/lib/Analysis/ExplodedGraph.cpp
index 88bb120..463b171 100644
--- a/lib/Analysis/ExplodedGraph.cpp
+++ b/lib/Analysis/ExplodedGraph.cpp
@@ -64,10 +64,10 @@
}
void ExplodedNode::NodeGroup::addNode(ExplodedNode* N) {
-
+
assert ((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
assert (!getFlag());
-
+
if (getKind() == Size1) {
if (ExplodedNode* NOld = getNode()) {
std::vector<ExplodedNode*>* V = new std::vector<ExplodedNode*>();
@@ -93,7 +93,7 @@
unsigned ExplodedNode::NodeGroup::size() const {
if (getFlag())
return 0;
-
+
if (getKind() == Size1)
return getNode() ? 1 : 0;
else
@@ -103,7 +103,7 @@
ExplodedNode** ExplodedNode::NodeGroup::begin() const {
if (getFlag())
return NULL;
-
+
if (getKind() == Size1)
return (ExplodedNode**) (getPtr() ? &P : NULL);
else
@@ -113,7 +113,7 @@
ExplodedNode** ExplodedNode::NodeGroup::end() const {
if (getFlag())
return NULL;
-
+
if (getKind() == Size1)
return (ExplodedNode**) (getPtr() ? &P+1 : NULL);
else {
@@ -127,47 +127,47 @@
if (getKind() == SizeOther) delete &getVector(getPtr());
}
-ExplodedNode *ExplodedGraph::getNode(const ProgramPoint& L,
+ExplodedNode *ExplodedGraph::getNode(const ProgramPoint& L,
const GRState* State, bool* IsNew) {
// Profile 'State' to determine if we already have an existing node.
- llvm::FoldingSetNodeID profile;
+ llvm::FoldingSetNodeID profile;
void* InsertPos = 0;
-
+
NodeTy::Profile(profile, L, State);
NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
-
+
if (!V) {
// Allocate a new node.
V = (NodeTy*) Allocator.Allocate<NodeTy>();
new (V) NodeTy(L, State);
-
+
// Insert the node into the node set and return it.
Nodes.InsertNode(V, InsertPos);
-
+
++NumNodes;
-
+
if (IsNew) *IsNew = true;
}
else
if (IsNew) *IsNew = false;
-
+
return V;
}
std::pair<ExplodedGraph*, InterExplodedGraphMap*>
ExplodedGraph::Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd,
llvm::DenseMap<const void*, const void*> *InverseMap) const {
-
+
if (NBeg == NEnd)
return std::make_pair((ExplodedGraph*) 0,
(InterExplodedGraphMap*) 0);
-
+
assert (NBeg < NEnd);
llvm::OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap());
-
+
ExplodedGraph* G = TrimInternal(NBeg, NEnd, M.get(), InverseMap);
-
+
return std::make_pair(static_cast<ExplodedGraph*>(G), M.take());
}
@@ -179,10 +179,10 @@
typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
Pass1Ty Pass1;
-
+
typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> Pass2Ty;
Pass2Ty& Pass2 = M->M;
-
+
llvm::SmallVector<const ExplodedNode*, 10> WL1, WL2;
// ===- Pass 1 (reverse DFS) -===
@@ -190,59 +190,59 @@
assert(*I);
WL1.push_back(*I);
}
-
+
// Process the first worklist until it is empty. Because it is a std::list
// it acts like a FIFO queue.
while (!WL1.empty()) {
const ExplodedNode *N = WL1.back();
WL1.pop_back();
-
+
// Have we already visited this node? If so, continue to the next one.
if (Pass1.count(N))
continue;
// Otherwise, mark this node as visited.
Pass1.insert(N);
-
+
// If this is a root enqueue it to the second worklist.
if (N->Preds.empty()) {
WL2.push_back(N);
continue;
}
-
+
// Visit our predecessors and enqueue them.
for (ExplodedNode** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I)
WL1.push_back(*I);
}
-
+
// We didn't hit a root? Return with a null pointer for the new graph.
if (WL2.empty())
return 0;
// Create an empty graph.
ExplodedGraph* G = MakeEmptyGraph();
-
- // ===- Pass 2 (forward DFS to construct the new graph) -===
+
+ // ===- Pass 2 (forward DFS to construct the new graph) -===
while (!WL2.empty()) {
const ExplodedNode* N = WL2.back();
WL2.pop_back();
-
+
// Skip this node if we have already processed it.
if (Pass2.find(N) != Pass2.end())
continue;
-
+
// Create the corresponding node in the new graph and record the mapping
// from the old node to the new node.
ExplodedNode* NewN = G->getNode(N->getLocation(), N->State, NULL);
Pass2[N] = NewN;
-
+
// Also record the reverse mapping from the new node to the old node.
if (InverseMap) (*InverseMap)[NewN] = N;
-
+
// If this node is a root, designate it as such in the graph.
if (N->Preds.empty())
G->addRoot(NewN);
-
+
// In the case that some of the intended predecessors of NewN have already
// been created, we should hook them up as predecessors.
@@ -252,7 +252,7 @@
Pass2Ty::iterator PI = Pass2.find(*I);
if (PI == Pass2.end())
continue;
-
+
NewN->addPredecessor(PI->second);
}
@@ -261,7 +261,7 @@
// the new nodes from the original graph that should have nodes created
// in the new graph.
for (ExplodedNode **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) {
- Pass2Ty::iterator PI = Pass2.find(*I);
+ Pass2Ty::iterator PI = Pass2.find(*I);
if (PI != Pass2.end()) {
PI->second->addPredecessor(NewN);
continue;
@@ -271,12 +271,12 @@
if (Pass1.count(*I))
WL2.push_back(*I);
}
-
+
// Finally, explictly mark all nodes without any successors as sinks.
if (N->isSink())
NewN->markAsSink();
}
-
+
return G;
}
diff --git a/lib/Analysis/GRBlockCounter.cpp b/lib/Analysis/GRBlockCounter.cpp
index f69a16d..4f4103a 100644
--- a/lib/Analysis/GRBlockCounter.cpp
+++ b/lib/Analysis/GRBlockCounter.cpp
@@ -1,5 +1,5 @@
//==- GRBlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
diff --git a/lib/Analysis/GRCoreEngine.cpp b/lib/Analysis/GRCoreEngine.cpp
index 7983dd8..909f619 100644
--- a/lib/Analysis/GRCoreEngine.cpp
+++ b/lib/Analysis/GRCoreEngine.cpp
@@ -1,5 +1,5 @@
//==- GRCoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
@@ -48,27 +48,27 @@
return U;
}
};
-
+
class VISIBILITY_HIDDEN BFS : public GRWorkList {
std::queue<GRWorkListUnit> Queue;
public:
virtual bool hasWork() const {
return !Queue.empty();
}
-
+
virtual void Enqueue(const GRWorkListUnit& U) {
Queue.push(U);
}
-
+
virtual GRWorkListUnit Dequeue() {
// Don't use const reference. The subsequent pop_back() might make it
// unsafe.
- GRWorkListUnit U = Queue.front();
+ GRWorkListUnit U = Queue.front();
Queue.pop();
return U;
}
};
-
+
} // end anonymous namespace
// Place the dstor for GRWorkList here because it contains virtual member
@@ -86,14 +86,14 @@
virtual bool hasWork() const {
return !Queue.empty() || !Stack.empty();
}
-
+
virtual void Enqueue(const GRWorkListUnit& U) {
if (isa<BlockEntrance>(U.getNode()->getLocation()))
Queue.push(U);
else
Stack.push_back(U);
}
-
+
virtual GRWorkListUnit Dequeue() {
// Process all basic blocks to completion.
if (!Stack.empty()) {
@@ -101,13 +101,13 @@
Stack.pop_back(); // This technically "invalidates" U, but we are fine.
return U;
}
-
+
assert(!Queue.empty());
// Don't use const reference. The subsequent pop_back() might make it
// unsafe.
- GRWorkListUnit U = Queue.front();
+ GRWorkListUnit U = Queue.front();
Queue.pop();
- return U;
+ return U;
}
};
} // end anonymous namespace
@@ -128,13 +128,13 @@
}
bool GRCoreEngine::ProcessBlockEntrance(CFGBlock* Blk, const GRState* State,
- GRBlockCounter BC) {
+ GRBlockCounter BC) {
return SubEngine.ProcessBlockEntrance(Blk, State, BC);
}
void GRCoreEngine::ProcessBranch(Stmt* Condition, Stmt* Terminator,
GRBranchNodeBuilder& Builder) {
- SubEngine.ProcessBranch(Condition, Terminator, Builder);
+ SubEngine.ProcessBranch(Condition, Terminator, Builder);
}
void GRCoreEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& Builder) {
@@ -147,52 +147,52 @@
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
-
+
if (G->num_roots() == 0) { // Initialize the analysis by constructing
// the root if none exists.
-
+
CFGBlock* Entry = &(L->getCFG()->getEntry());
-
- assert (Entry->empty() &&
+
+ assert (Entry->empty() &&
"Entry block must be empty.");
-
+
assert (Entry->succ_size() == 1 &&
"Entry block must have 1 successor.");
-
+
// Get the solitary successor.
- CFGBlock* Succ = *(Entry->succ_begin());
-
+ CFGBlock* Succ = *(Entry->succ_begin());
+
// Construct an edge representing the
// starting location in the function.
BlockEdge StartLoc(Entry, Succ, L);
-
+
// Set the current block counter to being empty.
WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
-
+
// Generate the root.
GenerateNode(StartLoc, getInitialState(L), 0);
}
-
+
while (Steps && WList->hasWork()) {
--Steps;
const GRWorkListUnit& WU = WList->Dequeue();
-
+
// Set the current block counter.
WList->setBlockCounter(WU.getBlockCounter());
// Retrieve the node.
ExplodedNode* Node = WU.getNode();
-
+
// Dispatch on the location type.
switch (Node->getLocation().getKind()) {
case ProgramPoint::BlockEdgeKind:
HandleBlockEdge(cast<BlockEdge>(Node->getLocation()), Node);
break;
-
+
case ProgramPoint::BlockEntranceKind:
HandleBlockEntrance(cast<BlockEntrance>(Node->getLocation()), Node);
break;
-
+
case ProgramPoint::BlockExitKind:
assert (false && "BlockExit location never occur in forward analysis.");
break;
@@ -201,22 +201,22 @@
assert(isa<PostStmt>(Node->getLocation()));
HandlePostStmt(cast<PostStmt>(Node->getLocation()), WU.getBlock(),
WU.getIndex(), Node);
- break;
+ break;
}
}
-
+
return WList->hasWork();
}
void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
-
+
CFGBlock* Blk = L.getDst();
-
- // Check if we are entering the EXIT block.
+
+ // Check if we are entering the EXIT block.
if (Blk == &(Pred->getLocationContext()->getCFG()->getExit())) {
-
- assert (Pred->getLocationContext()->getCFG()->getExit().size() == 0
+
+ assert (Pred->getLocationContext()->getCFG()->getExit().size() == 0
&& "EXIT block cannot contain Stmts.");
// Process the final state transition.
@@ -228,81 +228,81 @@
}
// FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
-
+
if (ProcessBlockEntrance(Blk, Pred->State, WList->getBlockCounter()))
GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), Pred->State, Pred);
}
void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
ExplodedNode* Pred) {
-
+
// Increment the block counter.
GRBlockCounter Counter = WList->getBlockCounter();
Counter = BCounterFactory.IncrementCount(Counter, L.getBlock()->getBlockID());
WList->setBlockCounter(Counter);
-
- // Process the entrance of the block.
+
+ // Process the entrance of the block.
if (Stmt* S = L.getFirstStmt()) {
- GRStmtNodeBuilder Builder(L.getBlock(), 0, Pred, this,
+ GRStmtNodeBuilder Builder(L.getBlock(), 0, Pred, this,
SubEngine.getStateManager());
ProcessStmt(S, Builder);
}
- else
+ else
HandleBlockExit(L.getBlock(), Pred);
}
void GRCoreEngine::HandleBlockExit(CFGBlock * B, ExplodedNode* Pred) {
-
+
if (Stmt* Term = B->getTerminator()) {
switch (Term->getStmtClass()) {
default:
assert(false && "Analysis for this terminator not implemented.");
break;
-
+
case Stmt::BinaryOperatorClass: // '&&' and '||'
HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
return;
-
+
case Stmt::ConditionalOperatorClass:
HandleBranch(cast<ConditionalOperator>(Term)->getCond(), Term, B, Pred);
return;
-
+
// FIXME: Use constant-folding in CFG construction to simplify this
// case.
-
+
case Stmt::ChooseExprClass:
HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::DoStmtClass:
HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::ForStmtClass:
HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::ContinueStmtClass:
case Stmt::BreakStmtClass:
- case Stmt::GotoStmtClass:
+ case Stmt::GotoStmtClass:
break;
-
+
case Stmt::IfStmtClass:
HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::IndirectGotoStmtClass: {
// Only 1 successor: the indirect goto dispatch block.
assert (B->succ_size() == 1);
-
+
GRIndirectGotoNodeBuilder
builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
*(B->succ_begin()), this);
-
+
ProcessIndirectGoto(builder);
return;
}
-
+
case Stmt::ObjCForCollectionStmtClass: {
// In the case of ObjCForCollectionStmt, it appears twice in a CFG:
//
@@ -317,15 +317,15 @@
HandleBranch(Term, Term, B, Pred);
return;
}
-
+
case Stmt::SwitchStmtClass: {
GRSwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
this);
-
+
ProcessSwitch(builder);
return;
}
-
+
case Stmt::WhileStmtClass:
HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
return;
@@ -334,8 +334,8 @@
assert (B->succ_size() == 1 &&
"Blocks with no terminator should have at most 1 successor.");
-
- GenerateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
+
+ GenerateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
Pred->State, Pred);
}
@@ -345,19 +345,19 @@
GRBranchNodeBuilder Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
Pred, this);
-
+
ProcessBranch(Cond, Term, Builder);
}
void GRCoreEngine::HandlePostStmt(const PostStmt& L, CFGBlock* B,
unsigned StmtIdx, ExplodedNode* Pred) {
-
+
assert (!B->empty());
if (StmtIdx == B->size())
HandleBlockExit(B, Pred);
else {
- GRStmtNodeBuilder Builder(B, StmtIdx, Pred, this,
+ GRStmtNodeBuilder Builder(B, StmtIdx, Pred, this,
SubEngine.getStateManager());
ProcessStmt((*B)[StmtIdx], Builder);
}
@@ -365,19 +365,19 @@
/// GenerateNode - Utility method to generate nodes, hook up successors,
/// and add nodes to the worklist.
-void GRCoreEngine::GenerateNode(const ProgramPoint& Loc,
+void GRCoreEngine::GenerateNode(const ProgramPoint& Loc,
const GRState* State, ExplodedNode* Pred) {
-
+
bool IsNew;
ExplodedNode* Node = G->getNode(Loc, State, &IsNew);
-
- if (Pred)
+
+ if (Pred)
Node->addPredecessor(Pred); // Link 'Node' with its predecessor.
else {
assert (IsNew);
G->addRoot(Node); // 'Node' has no predecessor. Make it a root.
}
-
+
// Only add 'Node' to the worklist if it was freshly generated.
if (IsNew) WList->Enqueue(Node);
}
@@ -385,7 +385,7 @@
GRStmtNodeBuilder::GRStmtNodeBuilder(CFGBlock* b, unsigned idx,
ExplodedNode* N, GRCoreEngine* e,
GRStateManager &mgr)
- : Eng(*e), B(*b), Idx(idx), Pred(N), LastNode(N), Mgr(mgr), Auditor(0),
+ : Eng(*e), B(*b), Idx(idx), Pred(N), LastNode(N), Mgr(mgr), Auditor(0),
PurgingDeadSymbols(false), BuildSinks(false), HasGeneratedNode(false),
PointKind(ProgramPoint::PostStmtKind), Tag(0) {
Deferred.insert(N);
@@ -400,16 +400,16 @@
void GRStmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
assert (!N->isSink());
-
+
PostStmt Loc(getStmt(), N->getLocationContext());
-
+
if (Loc == N->getLocation()) {
// Note: 'N' should be a fresh node because otherwise it shouldn't be
// a member of Deferred.
Eng.WList->Enqueue(N, B, Idx+1);
return;
}
-
+
bool IsNew;
ExplodedNode* Succ = Eng.G->getNode(Loc, N->State, &IsNew);
Succ->addPredecessor(N);
@@ -423,10 +423,10 @@
switch (K) {
default:
assert(false && "Invalid PostXXXKind.");
-
+
case ProgramPoint::PostStmtKind:
return PostStmt(S, L, tag);
-
+
case ProgramPoint::PostLoadKind:
return PostLoad(S, L, tag);
@@ -435,19 +435,19 @@
case ProgramPoint::PostLocationChecksSucceedKind:
return PostLocationChecksSucceed(S, L, tag);
-
+
case ProgramPoint::PostOutOfBoundsCheckFailedKind:
return PostOutOfBoundsCheckFailed(S, L, tag);
-
+
case ProgramPoint::PostNullCheckFailedKind:
return PostNullCheckFailed(S, L, tag);
-
+
case ProgramPoint::PostStoreKind:
return PostStore(S, L, tag);
-
+
case ProgramPoint::PostLValueKind:
return PostLValue(S, L, tag);
-
+
case ProgramPoint::PostPurgeDeadSymbolsKind:
return PostPurgeDeadSymbols(S, L, tag);
}
@@ -459,10 +459,10 @@
ProgramPoint::Kind K,
const void *tag) {
return K == ProgramPoint::PreStmtKind
- ? generateNodeInternal(PreStmt(S, Pred->getLocationContext(),tag),
+ ? generateNodeInternal(PreStmt(S, Pred->getLocationContext(),tag),
State, Pred)
: generateNodeInternal(GetPostLoc(S, K, Pred->getLocationContext(), tag),
- State, Pred);
+ State, Pred);
}
ExplodedNode*
@@ -473,49 +473,49 @@
ExplodedNode* N = Eng.G->getNode(Loc, State, &IsNew);
N->addPredecessor(Pred);
Deferred.erase(Pred);
-
+
if (IsNew) {
Deferred.insert(N);
LastNode = N;
return N;
}
-
+
LastNode = NULL;
- return NULL;
+ return NULL;
}
ExplodedNode* GRBranchNodeBuilder::generateNode(const GRState* State,
bool branch) {
-
+
// If the branch has been marked infeasible we should not generate a node.
if (!isFeasible(branch))
return NULL;
-
+
bool IsNew;
-
+
ExplodedNode* Succ =
Eng.G->getNode(BlockEdge(Src,branch ? DstT:DstF,Pred->getLocationContext()),
State, &IsNew);
-
+
Succ->addPredecessor(Pred);
-
+
if (branch)
GeneratedTrue = true;
else
- GeneratedFalse = true;
-
+ GeneratedFalse = true;
+
if (IsNew) {
Deferred.push_back(Succ);
return Succ;
}
-
+
return NULL;
}
GRBranchNodeBuilder::~GRBranchNodeBuilder() {
if (!GeneratedTrue) generateNode(Pred->State, true);
if (!GeneratedFalse) generateNode(Pred->State, false);
-
+
for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
if (!(*I)->isSink()) Eng.WList->Enqueue(*I);
}
@@ -525,22 +525,22 @@
GRIndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St,
bool isSink) {
bool IsNew;
-
- ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+
+ ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
Pred->getLocationContext()), St, &IsNew);
-
+
Succ->addPredecessor(Pred);
-
+
if (IsNew) {
-
+
if (isSink)
Succ->markAsSink();
else
Eng.WList->Enqueue(Succ);
-
+
return Succ;
}
-
+
return NULL;
}
@@ -549,42 +549,42 @@
GRSwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){
bool IsNew;
-
+
ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
Pred->getLocationContext()), St, &IsNew);
Succ->addPredecessor(Pred);
-
+
if (IsNew) {
Eng.WList->Enqueue(Succ);
return Succ;
}
-
+
return NULL;
}
ExplodedNode*
GRSwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
-
+
// Get the block for the default case.
assert (Src->succ_rbegin() != Src->succ_rend());
CFGBlock* DefaultBlock = *Src->succ_rbegin();
-
+
bool IsNew;
-
+
ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock,
Pred->getLocationContext()), St, &IsNew);
Succ->addPredecessor(Pred);
-
+
if (IsNew) {
if (isSink)
Succ->markAsSink();
else
Eng.WList->Enqueue(Succ);
-
+
return Succ;
}
-
+
return NULL;
}
@@ -596,18 +596,18 @@
ExplodedNode*
GREndPathNodeBuilder::generateNode(const GRState* State, const void *tag,
ExplodedNode* P) {
- HasGeneratedNode = true;
+ HasGeneratedNode = true;
bool IsNew;
-
- ExplodedNode* Node = Eng.G->getNode(BlockEntrance(&B,
+
+ ExplodedNode* Node = Eng.G->getNode(BlockEntrance(&B,
Pred->getLocationContext(), tag), State, &IsNew);
-
+
Node->addPredecessor(P ? P : Pred);
-
+
if (IsNew) {
Eng.G->addEndOfPath(Node);
return Node;
}
-
+
return NULL;
}
diff --git a/lib/Analysis/GRExprEngine.cpp b/lib/Analysis/GRExprEngine.cpp
index 053da67..b4b69cd 100644
--- a/lib/Analysis/GRExprEngine.cpp
+++ b/lib/Analysis/GRExprEngine.cpp
@@ -44,7 +44,7 @@
class VISIBILITY_HIDDEN MappedBatchAuditor : public GRSimpleAPICheck {
typedef llvm::ImmutableList<GRSimpleAPICheck*> Checks;
typedef llvm::DenseMap<void*,Checks> MapTy;
-
+
MapTy M;
Checks::Factory F;
Checks AllStmts;
@@ -52,18 +52,18 @@
public:
MappedBatchAuditor(llvm::BumpPtrAllocator& Alloc) :
F(Alloc), AllStmts(F.GetEmptyList()) {}
-
+
virtual ~MappedBatchAuditor() {
llvm::DenseSet<GRSimpleAPICheck*> AlreadyVisited;
-
+
for (MapTy::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E;++I){
GRSimpleAPICheck* check = *I;
-
+
if (AlreadyVisited.count(check))
continue;
-
+
AlreadyVisited.insert(check);
delete check;
}
@@ -75,10 +75,10 @@
MapTy::iterator I = M.find(key);
M[key] = F.Concat(A, I == M.end() ? F.GetEmptyList() : I->second);
}
-
+
void AddCheck(GRSimpleAPICheck *A) {
assert (A && "Check cannot be null.");
- AllStmts = F.Concat(A, AllStmts);
+ AllStmts = F.Concat(A, AllStmts);
}
virtual bool Audit(ExplodedNode* N, GRStateManager& VMgr) {
@@ -86,17 +86,17 @@
bool isSink = false;
for (Checks::iterator I = AllStmts.begin(), E = AllStmts.end(); I!=E; ++I)
isSink |= (*I)->Audit(N, VMgr);
-
+
// Next handle the auditors that accept only specific statements.
const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
void* key = reinterpret_cast<void*>((uintptr_t) S->getStmtClass());
MapTy::iterator MI = M.find(key);
- if (MI != M.end()) {
+ if (MI != M.end()) {
for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E; ++I)
isSink |= (*I)->Audit(N, VMgr);
}
-
- return isSink;
+
+ return isSink;
}
};
@@ -105,30 +105,30 @@
//===----------------------------------------------------------------------===//
// Checker worklist routines.
//===----------------------------------------------------------------------===//
-
-void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
+
+void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
ExplodedNodeSet &Src, bool isPrevisit) {
-
+
if (Checkers.empty()) {
Dst = Src;
return;
}
-
+
ExplodedNodeSet Tmp;
ExplodedNodeSet *PrevSet = &Src;
-
+
for (std::vector<Checker*>::iterator I = Checkers.begin(), E = Checkers.end();
I != E; ++I) {
- ExplodedNodeSet *CurrSet = (I+1 == E) ? &Dst
+ ExplodedNodeSet *CurrSet = (I+1 == E) ? &Dst
: (PrevSet == &Tmp) ? &Src : &Tmp;
CurrSet->clear();
Checker *checker = *I;
-
+
for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
NI != NE; ++NI)
checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, isPrevisit);
-
+
// Update which NodeSet is the current one.
PrevSet = CurrSet;
}
@@ -149,20 +149,20 @@
GRExprEngine::GRExprEngine(AnalysisManager &mgr)
: AMgr(mgr),
- CoreEngine(mgr.getASTContext(), *this),
+ CoreEngine(mgr.getASTContext(), *this),
G(CoreEngine.getGraph()),
Builder(NULL),
- StateMgr(G.getContext(), mgr.getStoreManagerCreator(),
+ StateMgr(G.getContext(), mgr.getStoreManagerCreator(),
mgr.getConstraintManagerCreator(), G.getAllocator()),
SymMgr(StateMgr.getSymbolManager()),
ValMgr(StateMgr.getValueManager()),
SVator(ValMgr.getSValuator()),
CurrentStmt(NULL),
NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
- RaiseSel(GetNullarySelector("raise", G.getContext())),
+ RaiseSel(GetNullarySelector("raise", G.getContext())),
BR(mgr, *this) {}
-GRExprEngine::~GRExprEngine() {
+GRExprEngine::~GRExprEngine() {
BR.FlushReports();
delete [] NSExceptionInstanceRaiseSelectors;
for (std::vector<Checker*>::iterator I=Checkers.begin(), E=Checkers.end();
@@ -184,7 +184,7 @@
void GRExprEngine::AddCheck(GRSimpleAPICheck* A, Stmt::StmtClass C) {
if (!BatchAuditor)
BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
-
+
((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A, C);
}
@@ -197,7 +197,7 @@
const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
const GRState *state = StateMgr.getInitialState(InitLoc);
-
+
// Precondition: the first argument of 'main' is an integer guaranteed
// to be > 0.
// FIXME: It would be nice if we had a more general mechanism to add
@@ -212,13 +212,13 @@
SVal V = state->getSVal(loc::MemRegionVal(R));
SVal Constraint = EvalBinOp(state, BinaryOperator::GT, V,
ValMgr.makeZeroVal(T),
- getContext().IntTy);
+ getContext().IntTy);
if (const GRState *newState = state->assume(Constraint, true))
state = newState;
}
}
-
+
return state;
}
@@ -227,31 +227,31 @@
//===----------------------------------------------------------------------===//
void GRExprEngine::ProcessStmt(Stmt* S, GRStmtNodeBuilder& builder) {
-
+
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
S->getLocStart(),
"Error evaluating statement");
-
+
Builder = &builder;
EntryNode = builder.getLastNode();
-
+
// FIXME: Consolidate.
CurrentStmt = S;
StateMgr.CurrentStmt = S;
-
+
// Set up our simple checks.
if (BatchAuditor)
Builder->setAuditor(BatchAuditor.get());
-
- // Create the cleaned state.
- SymbolReaper SymReaper(*AMgr.getLiveVariables(), SymMgr);
+
+ // Create the cleaned state.
+ SymbolReaper SymReaper(*AMgr.getLiveVariables(), SymMgr);
CleanedState = AMgr.shouldPurgeDead()
? StateMgr.RemoveDeadBindings(EntryNode->getState(), CurrentStmt, SymReaper)
: EntryNode->getState();
// Process any special transfer function for dead symbols.
ExplodedNodeSet Tmp;
-
+
if (!SymReaper.hasDeadSymbols())
Tmp.Add(EntryNode);
else {
@@ -260,36 +260,36 @@
SaveAndRestore<bool> OldPurgeDeadSymbols(Builder->PurgingDeadSymbols);
Builder->PurgingDeadSymbols = true;
-
- getTF().EvalDeadSymbols(Tmp, *this, *Builder, EntryNode, S,
+
+ getTF().EvalDeadSymbols(Tmp, *this, *Builder, EntryNode, S,
CleanedState, SymReaper);
if (!Builder->BuildSinks && !Builder->HasGeneratedNode)
Tmp.Add(EntryNode);
}
-
+
bool HasAutoGenerated = false;
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
ExplodedNodeSet Dst;
-
- // Set the cleaned state.
+
+ // Set the cleaned state.
Builder->SetCleanedState(*I == EntryNode ? CleanedState : GetState(*I));
-
- // Visit the statement.
+
+ // Visit the statement.
Visit(S, *I, Dst);
// Do we need to auto-generate a node? We only need to do this to generate
// a node with a "cleaned" state; GRCoreEngine will actually handle
- // auto-transitions for other cases.
+ // auto-transitions for other cases.
if (Dst.size() == 1 && *Dst.begin() == EntryNode
&& !Builder->HasGeneratedNode && !HasAutoGenerated) {
HasAutoGenerated = true;
builder.generateNode(S, GetState(EntryNode), *I);
}
}
-
+
// NULL out these variables to cleanup.
CleanedState = NULL;
EntryNode = NULL;
@@ -297,11 +297,11 @@
// FIXME: Consolidate.
StateMgr.CurrentStmt = 0;
CurrentStmt = 0;
-
+
Builder = NULL;
}
-void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
+void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
S->getLocStart(),
"Error evaluating statement");
@@ -309,32 +309,32 @@
// FIXME: add metadata to the CFG so that we can disable
// this check when we KNOW that there is no block-level subexpression.
// The motivation is that this check requires a hashtable lookup.
-
+
if (S != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) {
Dst.Add(Pred);
return;
}
-
+
switch (S->getStmtClass()) {
-
+
default:
// Cases we intentionally have "default" handle:
// AddrLabelExpr, IntegerLiteral, CharacterLiteral
-
+
Dst.Add(Pred); // No-op. Simply propagate the current state unchanged.
break;
-
+
case Stmt::ArraySubscriptExprClass:
VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::AsmStmtClass:
VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
break;
-
+
case Stmt::BinaryOperatorClass: {
BinaryOperator* B = cast<BinaryOperator>(S);
-
+
if (B->isLogicalOp()) {
VisitLogicalExpr(B, Pred, Dst);
break;
@@ -348,7 +348,7 @@
if (AMgr.shouldEagerlyAssume() && (B->isRelationalOp() || B->isEqualityOp())) {
ExplodedNodeSet Tmp;
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
- EvalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
+ EvalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
}
else
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
@@ -365,13 +365,13 @@
// FIXME: ChooseExpr is really a constant. We need to fix
// the CFG do not model them as explicit control-flow.
-
+
case Stmt::ChooseExprClass: { // __builtin_choose_expr
ChooseExpr* C = cast<ChooseExpr>(S);
VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
break;
}
-
+
case Stmt::CompoundAssignOperatorClass:
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
break;
@@ -379,22 +379,22 @@
case Stmt::CompoundLiteralExprClass:
VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::ConditionalOperatorClass: { // '?' operator
ConditionalOperator* C = cast<ConditionalOperator>(S);
VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
break;
}
-
+
case Stmt::DeclRefExprClass:
case Stmt::QualifiedDeclRefExprClass:
VisitDeclRefExpr(cast<DeclRefExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::DeclStmtClass:
VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
break;
-
+
case Stmt::ImplicitCastExprClass:
case Stmt::CStyleCastExprClass: {
CastExpr* C = cast<CastExpr>(S);
@@ -405,11 +405,11 @@
case Stmt::InitListExprClass:
VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
break;
-
+
case Stmt::MemberExprClass:
VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::ObjCIvarRefExprClass:
VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst, false);
break;
@@ -417,12 +417,12 @@
case Stmt::ObjCForCollectionStmtClass:
VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
break;
-
+
case Stmt::ObjCMessageExprClass: {
VisitObjCMessageExpr(cast<ObjCMessageExpr>(S), Pred, Dst);
break;
}
-
+
case Stmt::ObjCAtThrowStmtClass: {
// FIXME: This is not complete. We basically treat @throw as
// an abort.
@@ -431,19 +431,19 @@
MakeNode(Dst, S, Pred, GetState(Pred));
break;
}
-
+
case Stmt::ParenExprClass:
Visit(cast<ParenExpr>(S)->getSubExpr()->IgnoreParens(), Pred, Dst);
break;
-
+
case Stmt::ReturnStmtClass:
VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
break;
-
+
case Stmt::SizeOfAlignOfExprClass:
VisitSizeOfAlignOfExpr(cast<SizeOfAlignOfExpr>(S), Pred, Dst);
break;
-
+
case Stmt::StmtExprClass: {
StmtExpr* SE = cast<StmtExpr>(S);
@@ -454,21 +454,21 @@
Dst.Add(Pred);
break;
}
-
+
if (Expr* LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
const GRState* state = GetState(Pred);
MakeNode(Dst, SE, Pred, state->BindExpr(SE, state->getSVal(LastExpr)));
}
else
Dst.Add(Pred);
-
+
break;
}
case Stmt::StringLiteralClass:
VisitLValue(cast<StringLiteral>(S), Pred, Dst);
break;
-
+
case Stmt::UnaryOperatorClass: {
UnaryOperator *U = cast<UnaryOperator>(S);
if (AMgr.shouldEagerlyAssume() && (U->getOpcode() == UnaryOperator::LNot)) {
@@ -483,43 +483,43 @@
}
}
-void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
+void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
-
+
Ex = Ex->IgnoreParens();
-
+
if (Ex != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(Ex)) {
Dst.Add(Pred);
return;
}
-
+
switch (Ex->getStmtClass()) {
-
+
case Stmt::ArraySubscriptExprClass:
VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::DeclRefExprClass:
case Stmt::QualifiedDeclRefExprClass:
VisitDeclRefExpr(cast<DeclRefExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::ObjCIvarRefExprClass:
VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::UnaryOperatorClass:
VisitUnaryOperator(cast<UnaryOperator>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::MemberExprClass:
VisitMemberExpr(cast<MemberExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::CompoundLiteralExprClass:
VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::ObjCPropertyRefExprClass:
case Stmt::ObjCImplicitSetterGetterRefExprClass:
// FIXME: Property assignments are lvalues, but not really "locations".
@@ -542,7 +542,7 @@
MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
return;
}
-
+
default:
// Arbitrary subexpressions can return aggregate temporaries that
// can be used in a lvalue context. We need to enhance our support
@@ -551,7 +551,7 @@
assert ((Ex->getType()->isAggregateType()) &&
"Other kinds of expressions with non-aggregate/union types do"
" not have lvalues.");
-
+
Visit(Ex, Pred, Dst);
}
}
@@ -562,7 +562,7 @@
bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const GRState*,
GRBlockCounter BC) {
-
+
return BC.getNumVisited(B->getBlockID()) < 3;
}
@@ -586,53 +586,53 @@
const GRState* GRExprEngine::MarkBranch(const GRState* state,
Stmt* Terminator,
bool branchTaken) {
-
+
switch (Terminator->getStmtClass()) {
default:
return state;
-
+
case Stmt::BinaryOperatorClass: { // '&&' and '||'
-
+
BinaryOperator* B = cast<BinaryOperator>(Terminator);
BinaryOperator::Opcode Op = B->getOpcode();
-
+
assert (Op == BinaryOperator::LAnd || Op == BinaryOperator::LOr);
-
+
// For &&, if we take the true branch, then the value of the whole
// expression is that of the RHS expression.
//
// For ||, if we take the false branch, then the value of the whole
// expression is that of the RHS expression.
-
+
Expr* Ex = (Op == BinaryOperator::LAnd && branchTaken) ||
- (Op == BinaryOperator::LOr && !branchTaken)
+ (Op == BinaryOperator::LOr && !branchTaken)
? B->getRHS() : B->getLHS();
-
+
return state->BindExpr(B, UndefinedVal(Ex));
}
-
+
case Stmt::ConditionalOperatorClass: { // ?:
-
+
ConditionalOperator* C = cast<ConditionalOperator>(Terminator);
-
+
// For ?, if branchTaken == true then the value is either the LHS or
// the condition itself. (GNU extension).
-
- Expr* Ex;
-
+
+ Expr* Ex;
+
if (branchTaken)
- Ex = C->getLHS() ? C->getLHS() : C->getCond();
+ Ex = C->getLHS() ? C->getLHS() : C->getCond();
else
Ex = C->getRHS();
-
+
return state->BindExpr(C, UndefinedVal(Ex));
}
-
+
case Stmt::ChooseExprClass: { // ?:
-
+
ChooseExpr* C = cast<ChooseExpr>(Terminator);
-
- Expr* Ex = branchTaken ? C->getLHS() : C->getRHS();
+
+ Expr* Ex = branchTaken ? C->getLHS() : C->getRHS();
return state->BindExpr(C, UndefinedVal(Ex));
}
}
@@ -652,19 +652,19 @@
uint64_t bits = 0;
bool bitsInit = false;
-
+
while (CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
QualType T = CE->getType();
if (!T->isIntegerType())
return UnknownVal();
-
+
uint64_t newBits = Ctx.getTypeSize(T);
if (!bitsInit || newBits < bits) {
bitsInit = true;
bits = newBits;
}
-
+
Ex = CE->getSubExpr();
}
@@ -673,26 +673,26 @@
if (!bitsInit || !T->isIntegerType() || Ctx.getTypeSize(T) > bits)
return UnknownVal();
-
+
return state->getSVal(Ex);
}
void GRExprEngine::ProcessBranch(Stmt* Condition, Stmt* Term,
GRBranchNodeBuilder& builder) {
-
+
// Check for NULL conditions; e.g. "for(;;)"
- if (!Condition) {
+ if (!Condition) {
builder.markInfeasible(false);
return;
}
-
+
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
Condition->getLocStart(),
"Error evaluating branch");
- const GRState* PrevState = builder.getState();
+ const GRState* PrevState = builder.getState();
SVal V = PrevState->getSVal(Condition);
-
+
switch (V.getBaseKind()) {
default:
break;
@@ -707,32 +707,32 @@
SVal recovered = RecoverCastedSymbol(getStateManager(),
builder.getState(), Condition,
getContext());
-
+
if (!recovered.isUnknown()) {
V = recovered;
break;
}
}
}
-
+
builder.generateNode(MarkBranch(PrevState, Term, true), true);
builder.generateNode(MarkBranch(PrevState, Term, false), false);
return;
}
-
- case SVal::UndefinedKind: {
+
+ case SVal::UndefinedKind: {
ExplodedNode* N = builder.generateNode(PrevState, true);
if (N) {
N->markAsSink();
UndefBranches.insert(N);
}
-
+
builder.markInfeasible(false);
return;
- }
+ }
}
-
+
// Process the true branch.
if (builder.isFeasible(true)) {
if (const GRState *state = PrevState->assume(V, true))
@@ -740,8 +740,8 @@
else
builder.markInfeasible(true);
}
-
- // Process the false branch.
+
+ // Process the false branch.
if (builder.isFeasible(false)) {
if (const GRState *state = PrevState->assume(V, false))
builder.generateNode(MarkBranch(state, Term, false), false);
@@ -754,28 +754,28 @@
/// nodes by processing the 'effects' of a computed goto jump.
void GRExprEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) {
- const GRState *state = builder.getState();
+ const GRState *state = builder.getState();
SVal V = state->getSVal(builder.getTarget());
-
+
// Three possibilities:
//
// (1) We know the computed label.
// (2) The label is NULL (or some other constant), or Undefined.
// (3) We have no clue about the label. Dispatch to all targets.
//
-
+
typedef GRIndirectGotoNodeBuilder::iterator iterator;
if (isa<loc::GotoLabel>(V)) {
LabelStmt* L = cast<loc::GotoLabel>(V).getLabel();
-
+
for (iterator I=builder.begin(), E=builder.end(); I != E; ++I) {
if (I.getLabel() == L) {
builder.generateNode(I, state);
return;
}
}
-
+
assert (false && "No block with label.");
return;
}
@@ -786,10 +786,10 @@
UndefBranches.insert(N);
return;
}
-
+
// This is really a catch-all. We don't support symbolics yet.
// FIXME: Implement dispatch for symbolic pointers.
-
+
for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
builder.generateNode(I, state);
}
@@ -797,27 +797,27 @@
void GRExprEngine::VisitGuardedExpr(Expr* Ex, Expr* L, Expr* R,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
-
+
assert (Ex == CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(Ex));
-
+
const GRState* state = GetState(Pred);
SVal X = state->getSVal(Ex);
-
+
assert (X.isUndef());
-
+
Expr *SE = (Expr*) cast<UndefinedVal>(X).getData();
- assert(SE);
+ assert(SE);
X = state->getSVal(SE);
-
+
// Make sure that we invalidate the previous binding.
MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, X, true));
}
/// ProcessSwitch - Called by GRCoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a switch statement.
-void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
- typedef GRSwitchNodeBuilder::iterator iterator;
- const GRState* state = builder.getState();
+void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
+ typedef GRSwitchNodeBuilder::iterator iterator;
+ const GRState* state = builder.getState();
Expr* CondE = builder.getCondition();
SVal CondV = state->getSVal(CondE);
@@ -827,55 +827,55 @@
return;
}
- const GRState* DefaultSt = state;
+ const GRState* DefaultSt = state;
bool defaultIsFeasible = false;
-
+
for (iterator I = builder.begin(), EI = builder.end(); I != EI; ++I) {
CaseStmt* Case = cast<CaseStmt>(I.getCase());
// Evaluate the LHS of the case value.
Expr::EvalResult V1;
- bool b = Case->getLHS()->Evaluate(V1, getContext());
-
+ bool b = Case->getLHS()->Evaluate(V1, getContext());
+
// Sanity checks. These go away in Release builds.
- assert(b && V1.Val.isInt() && !V1.HasSideEffects
+ assert(b && V1.Val.isInt() && !V1.HasSideEffects
&& "Case condition must evaluate to an integer constant.");
- b = b; // silence unused variable warning
- assert(V1.Val.getInt().getBitWidth() ==
+ b = b; // silence unused variable warning
+ assert(V1.Val.getInt().getBitWidth() ==
getContext().getTypeSize(CondE->getType()));
-
+
// Get the RHS of the case, if it exists.
Expr::EvalResult V2;
-
+
if (Expr* E = Case->getRHS()) {
b = E->Evaluate(V2, getContext());
- assert(b && V2.Val.isInt() && !V2.HasSideEffects
+ assert(b && V2.Val.isInt() && !V2.HasSideEffects
&& "Case condition must evaluate to an integer constant.");
b = b; // silence unused variable warning
}
else
V2 = V1;
-
+
// FIXME: Eventually we should replace the logic below with a range
// comparison, rather than concretize the values within the range.
// This should be easy once we have "ranges" for NonLVals.
-
+
do {
- nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1.Val.getInt()));
+ nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1.Val.getInt()));
SVal Res = EvalBinOp(DefaultSt, BinaryOperator::EQ, CondV, CaseVal,
getContext().IntTy);
-
- // Now "assume" that the case matches.
+
+ // Now "assume" that the case matches.
if (const GRState* stateNew = state->assume(Res, true)) {
builder.generateCaseStmtNode(I, stateNew);
-
+
// If CondV evaluates to a constant, then we know that this
// is the *only* case that we can take, so stop evaluating the
// others.
if (isa<nonloc::ConcreteInt>(CondV))
return;
}
-
+
// Now "assume" that the case doesn't match. Add this state
// to the default state (if it is feasible).
if (const GRState *stateNew = DefaultSt->assume(Res, false)) {
@@ -886,15 +886,15 @@
// Concretize the next value in the range.
if (V1.Val.getInt() == V2.Val.getInt())
break;
-
+
++V1.Val.getInt();
assert (V1.Val.getInt() <= V2.Val.getInt());
-
+
} while (true);
}
-
+
// If we reach here, than we know that the default branch is
- // possible.
+ // possible.
if (defaultIsFeasible) builder.generateDefaultCaseNode(DefaultSt);
}
@@ -904,62 +904,62 @@
void GRExprEngine::VisitLogicalExpr(BinaryOperator* B, ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
-
+
assert(B->getOpcode() == BinaryOperator::LAnd ||
B->getOpcode() == BinaryOperator::LOr);
-
+
assert(B == CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(B));
-
+
const GRState* state = GetState(Pred);
SVal X = state->getSVal(B);
assert(X.isUndef());
-
+
Expr* Ex = (Expr*) cast<UndefinedVal>(X).getData();
-
+
assert(Ex);
-
+
if (Ex == B->getRHS()) {
-
+
X = state->getSVal(Ex);
-
+
// Handle undefined values.
-
+
if (X.isUndef()) {
MakeNode(Dst, B, Pred, state->BindExpr(B, X));
return;
}
-
+
// We took the RHS. Because the value of the '&&' or '||' expression must
// evaluate to 0 or 1, we must assume the value of the RHS evaluates to 0
// or 1. Alternatively, we could take a lazy approach, and calculate this
// value later when necessary. We don't have the machinery in place for
// this right now, and since most logical expressions are used for branches,
- // the payoff is not likely to be large. Instead, we do eager evaluation.
+ // the payoff is not likely to be large. Instead, we do eager evaluation.
if (const GRState *newState = state->assume(X, true))
- MakeNode(Dst, B, Pred,
+ MakeNode(Dst, B, Pred,
newState->BindExpr(B, ValMgr.makeIntVal(1U, B->getType())));
-
+
if (const GRState *newState = state->assume(X, false))
- MakeNode(Dst, B, Pred,
+ MakeNode(Dst, B, Pred,
newState->BindExpr(B, ValMgr.makeIntVal(0U, B->getType())));
}
else {
// We took the LHS expression. Depending on whether we are '&&' or
// '||' we know what the value of the expression is via properties of
// the short-circuiting.
- X = ValMgr.makeIntVal(B->getOpcode() == BinaryOperator::LAnd ? 0U : 1U,
+ X = ValMgr.makeIntVal(B->getOpcode() == BinaryOperator::LAnd ? 0U : 1U,
B->getType());
MakeNode(Dst, B, Pred, state->BindExpr(B, X));
}
}
-
+
//===----------------------------------------------------------------------===//
// Transfer functions: Loads and stores.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitDeclRefExpr(DeclRefExpr *Ex, ExplodedNode *Pred,
+void GRExprEngine::VisitDeclRefExpr(DeclRefExpr *Ex, ExplodedNode *Pred,
ExplodedNodeSet &Dst, bool asLValue) {
-
+
const GRState* state = GetState(Pred);
const NamedDecl* D = Ex->getDecl();
@@ -989,20 +989,20 @@
ProgramPoint::PostLValueKind);
return;
}
-
+
assert (false &&
"ValueDecl support for this ValueDecl not implemented.");
}
/// VisitArraySubscriptExpr - Transfer function for array accesses
-void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A,
+void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A,
ExplodedNode* Pred,
ExplodedNodeSet& Dst, bool asLValue){
-
+
Expr* Base = A->getBase()->IgnoreParens();
Expr* Idx = A->getIdx()->IgnoreParens();
ExplodedNodeSet Tmp;
-
+
if (Base->getType()->isVectorType()) {
// For vector types get its lvalue.
// FIXME: This may not be correct. Is the rvalue of a vector its location?
@@ -1010,13 +1010,13 @@
// semantics.
VisitLValue(Base, Pred, Tmp);
}
- else
+ else
Visit(Base, Pred, Tmp); // Get Base's rvalue, which should be an LocVal.
-
+
for (ExplodedNodeSet::iterator I1=Tmp.begin(), E1=Tmp.end(); I1!=E1; ++I1) {
ExplodedNodeSet Tmp2;
Visit(Idx, *I1, Tmp2); // Evaluate the index.
-
+
for (ExplodedNodeSet::iterator I2=Tmp2.begin(),E2=Tmp2.end();I2!=E2; ++I2) {
const GRState* state = GetState(*I2);
SVal V = state->getLValue(A->getType(), state->getSVal(Base),
@@ -1034,15 +1034,15 @@
/// VisitMemberExpr - Transfer function for member expressions.
void GRExprEngine::VisitMemberExpr(MemberExpr* M, ExplodedNode* Pred,
ExplodedNodeSet& Dst, bool asLValue) {
-
+
Expr* Base = M->getBase()->IgnoreParens();
ExplodedNodeSet Tmp;
-
- if (M->isArrow())
+
+ if (M->isArrow())
Visit(Base, Pred, Tmp); // p->f = ... or ... = p->f
else
VisitLValue(Base, Pred, Tmp); // x.f = ... or ... = x.f
-
+
FieldDecl *Field = dyn_cast<FieldDecl>(M->getMemberDecl());
if (!Field) // FIXME: skipping member expressions for non-fields
return;
@@ -1068,7 +1068,7 @@
const GRState* state, SVal location, SVal Val) {
const GRState* newState = 0;
-
+
if (location.isUnknown()) {
// We know that the new state will be the same as the old state since
// the location of the binding is "unknown". Consequently, there
@@ -1086,7 +1086,7 @@
// doesn't do anything, just auto-propagate the current state.
GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, Pred, newState, Ex,
newState != state);
-
+
getTF().EvalBind(BuilderRef, location, Val);
}
@@ -1099,19 +1099,19 @@
void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, Expr* Ex, ExplodedNode* Pred,
const GRState* state, SVal location, SVal Val,
const void *tag) {
-
+
assert (Builder && "GRStmtNodeBuilder must be defined.");
-
+
// Evaluate the location (checks for bad dereferences).
Pred = EvalLocation(Ex, Pred, state, location, tag);
-
+
if (!Pred)
return;
assert (!location.isUndef());
state = GetState(Pred);
- // Proceed with the store.
+ // Proceed with the store.
SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind);
SaveAndRestore<const void*> OldTag(Builder->Tag);
Builder->PointKind = ProgramPoint::PostStoreKind;
@@ -1123,14 +1123,14 @@
const GRState* state, SVal location,
const void *tag) {
- // Evaluate the location (checks for bad dereferences).
+ // Evaluate the location (checks for bad dereferences).
Pred = EvalLocation(Ex, Pred, state, location, tag);
-
+
if (!Pred)
return;
-
+
state = GetState(Pred);
-
+
// Proceed with the load.
ProgramPoint::Kind K = ProgramPoint::PostLoadKind;
@@ -1144,7 +1144,7 @@
}
else {
SVal V = state->getSVal(cast<Loc>(location), Ex->getType());
-
+
// Casts can create weird scenarios where a location must be implicitly
// converted to something else. For example:
//
@@ -1152,19 +1152,19 @@
// int *y = (int*) &x; // void** -> int* cast.
// invalidate(y); // 'x' now binds to a symbolic region
// int z = *y;
- //
+ //
//if (isa<Loc>(V) && !Loc::IsLocType(Ex->getType())) {
// V = EvalCast(V, Ex->getType());
//}
-
+
MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V), K, tag);
}
}
-void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, Expr* Ex, Expr* StoreE,
- ExplodedNode* Pred, const GRState* state,
+void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, Expr* Ex, Expr* StoreE,
+ ExplodedNode* Pred, const GRState* state,
SVal location, SVal Val, const void *tag) {
-
+
ExplodedNodeSet TmpDst;
EvalStore(TmpDst, StoreE, Pred, state, location, Val, tag);
@@ -1175,60 +1175,60 @@
ExplodedNode* GRExprEngine::EvalLocation(Stmt* Ex, ExplodedNode* Pred,
const GRState* state, SVal location,
const void *tag) {
-
+
SaveAndRestore<const void*> OldTag(Builder->Tag);
Builder->Tag = tag;
-
- // Check for loads/stores from/to undefined values.
+
+ // Check for loads/stores from/to undefined values.
if (location.isUndef()) {
ExplodedNode* N =
Builder->generateNode(Ex, state, Pred,
ProgramPoint::PostUndefLocationCheckFailedKind);
-
+
if (N) {
N->markAsSink();
UndefDeref.insert(N);
}
-
+
return 0;
}
-
+
// Check for loads/stores from/to unknown locations. Treat as No-Ops.
if (location.isUnknown())
return Pred;
-
+
// During a load, one of two possible situations arise:
// (1) A crash, because the location (pointer) was NULL.
// (2) The location (pointer) is not NULL, and the dereference works.
- //
+ //
// We add these assumptions.
-
- Loc LV = cast<Loc>(location);
-
+
+ Loc LV = cast<Loc>(location);
+
// "Assume" that the pointer is not NULL.
const GRState *StNotNull = state->assume(LV, true);
-
+
// "Assume" that the pointer is NULL.
const GRState *StNull = state->assume(LV, false);
- if (StNull) {
+ if (StNull) {
// Use the Generic Data Map to mark in the state what lval was null.
const SVal* PersistentLV = getBasicVals().getPersistentSVal(LV);
StNull = StNull->set<GRState::NullDerefTag>(PersistentLV);
-
+
// We don't use "MakeNode" here because the node will be a sink
// and we have no intention of processing it later.
ExplodedNode* NullNode =
- Builder->generateNode(Ex, StNull, Pred,
+ Builder->generateNode(Ex, StNull, Pred,
ProgramPoint::PostNullCheckFailedKind);
- if (NullNode) {
- NullNode->markAsSink();
+ if (NullNode) {
+ NullNode->markAsSink();
if (StNotNull) ImplicitNullDeref.insert(NullNode);
else ExplicitNullDeref.insert(NullNode);
}
}
-
+
if (!StNotNull)
return NULL;
@@ -1245,9 +1245,9 @@
SVal NumElements = getStoreManager().getSizeInElements(StNotNull,
ER->getSuperRegion());
- const GRState * StInBound = StNotNull->assumeInBound(Idx, NumElements,
+ const GRState * StInBound = StNotNull->assumeInBound(Idx, NumElements,
true);
- const GRState* StOutBound = StNotNull->assumeInBound(Idx, NumElements,
+ const GRState* StOutBound = StNotNull->assumeInBound(Idx, NumElements,
false);
if (StOutBound) {
@@ -1273,7 +1273,7 @@
}
}
#endif
-
+
// Generate a new node indicating the checks succeed.
return Builder->generateNode(Ex, StNotNull, Pred,
ProgramPoint::PostLocationChecksSucceedKind);
@@ -1292,45 +1292,45 @@
static bool EvalOSAtomicCompareAndSwap(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
- CallExpr* CE, SVal L,
+ CallExpr* CE, SVal L,
ExplodedNode* Pred) {
// Not enough arguments to match OSAtomicCompareAndSwap?
if (CE->getNumArgs() != 3)
return false;
-
+
ASTContext &C = Engine.getContext();
Expr *oldValueExpr = CE->getArg(0);
QualType oldValueType = C.getCanonicalType(oldValueExpr->getType());
Expr *newValueExpr = CE->getArg(1);
QualType newValueType = C.getCanonicalType(newValueExpr->getType());
-
+
// Do the types of 'oldValue' and 'newValue' match?
if (oldValueType != newValueType)
return false;
-
+
Expr *theValueExpr = CE->getArg(2);
const PointerType *theValueType =
theValueExpr->getType()->getAs<PointerType>();
-
+
// theValueType not a pointer?
if (!theValueType)
return false;
-
+
QualType theValueTypePointee =
C.getCanonicalType(theValueType->getPointeeType()).getUnqualifiedType();
-
+
// The pointee must match newValueType and oldValueType.
if (theValueTypePointee != newValueType)
return false;
-
+
static unsigned magic_load = 0;
static unsigned magic_store = 0;
const void *OSAtomicLoadTag = &magic_load;
const void *OSAtomicStoreTag = &magic_store;
-
+
// Load 'theValue'.
const GRState *state = Pred->getState();
ExplodedNodeSet Tmp;
@@ -1339,41 +1339,41 @@
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end();
I != E; ++I) {
-
+
ExplodedNode *N = *I;
const GRState *stateLoad = N->getState();
SVal theValueVal = stateLoad->getSVal(theValueExpr);
SVal oldValueVal = stateLoad->getSVal(oldValueExpr);
-
+
// FIXME: Issue an error.
if (theValueVal.isUndef() || oldValueVal.isUndef()) {
- return false;
+ return false;
}
-
+
SValuator &SVator = Engine.getSValuator();
-
+
// Perform the comparison.
SVal Cmp = SVator.EvalBinOp(stateLoad, BinaryOperator::EQ, theValueVal,
oldValueVal, Engine.getContext().IntTy);
const GRState *stateEqual = stateLoad->assume(Cmp, true);
-
+
// Were they equal?
if (stateEqual) {
// Perform the store.
ExplodedNodeSet TmpStore;
SVal val = stateEqual->getSVal(newValueExpr);
-
+
// Handle implicit value casts.
if (const TypedRegion *R =
dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
llvm::tie(state, val) = SVator.EvalCast(val, state, R->getValueType(C),
newValueExpr->getType());
- }
-
- Engine.EvalStore(TmpStore, theValueExpr, N, stateEqual, location,
+ }
+
+ Engine.EvalStore(TmpStore, theValueExpr, N, stateEqual, location,
val, OSAtomicStoreTag);
-
+
// Now bind the result of the comparison.
for (ExplodedNodeSet::iterator I2 = TmpStore.begin(),
E2 = TmpStore.end(); I2 != E2; ++I2) {
@@ -1383,14 +1383,14 @@
Engine.MakeNode(Dst, CE, predNew, stateNew->BindExpr(CE, Res));
}
}
-
+
// Were they not equal?
if (const GRState *stateNotEqual = stateLoad->assume(Cmp, false)) {
SVal Res = Engine.getValueManager().makeTruthVal(false, CE->getType());
Engine.MakeNode(Dst, CE, N, stateNotEqual->BindExpr(CE, Res));
}
}
-
+
return true;
}
@@ -1404,7 +1404,7 @@
return false;
const char *FName = FD->getNameAsCString();
-
+
// Check for compare and swap.
if (strncmp(FName, "OSAtomicCompareAndSwap", 22) == 0 ||
strncmp(FName, "objc_atomicCompareAndSwap", 25) == 0)
@@ -1418,12 +1418,12 @@
// Transfer function: Function calls.
//===----------------------------------------------------------------------===//
static void MarkNoReturnFunction(const FunctionDecl *FD, CallExpr *CE,
- const GRState *state,
+ const GRState *state,
GRStmtNodeBuilder *Builder) {
if (!FD)
return;
- if (FD->getAttr<NoReturnAttr>() ||
+ if (FD->getAttr<NoReturnAttr>() ||
FD->getAttr<AnalyzerNoReturnAttr>())
Builder->BuildSinks = true;
else {
@@ -1432,11 +1432,11 @@
// potentially cache these results.
const char* s = FD->getIdentifier()->getName();
unsigned n = strlen(s);
-
+
switch (n) {
default:
break;
-
+
case 4:
if (!memcmp(s, "exit", 4)) Builder->BuildSinks = true;
break;
@@ -1460,37 +1460,37 @@
Builder->BuildSinks = true;
break;
}
-
+
// FIXME: This is just a wrapper around throwing an exception.
// Eventually inter-procedural analysis should handle this easily.
if (!memcmp(s, "ziperr", 6)) Builder->BuildSinks = true;
break;
-
+
case 7:
if (!memcmp(s, "assfail", 7)) Builder->BuildSinks = true;
break;
-
+
case 8:
- if (!memcmp(s ,"db_error", 8) ||
+ if (!memcmp(s ,"db_error", 8) ||
!memcmp(s, "__assert", 8))
Builder->BuildSinks = true;
break;
-
+
case 12:
if (!memcmp(s, "__assert_rtn", 12)) Builder->BuildSinks = true;
break;
-
+
case 13:
if (!memcmp(s, "__assert_fail", 13)) Builder->BuildSinks = true;
break;
-
+
case 14:
if (!memcmp(s, "dtrace_assfail", 14) ||
!memcmp(s, "yy_fatal_error", 14))
Builder->BuildSinks = true;
break;
-
+
case 26:
if (!memcmp(s, "_XCAssertionFailureHandler", 26) ||
!memcmp(s, "_DTAssertionFailureHandler", 26) ||
@@ -1499,7 +1499,7 @@
break;
}
-
+
}
}
@@ -1508,7 +1508,7 @@
ExplodedNodeSet &Dst) {
if (!FD)
return false;
-
+
unsigned id = FD->getBuiltinID(getContext());
if (!id)
return false;
@@ -1518,18 +1518,18 @@
switch (id) {
case Builtin::BI__builtin_expect: {
// For __builtin_expect, just return the value of the subexpression.
- assert (CE->arg_begin() != CE->arg_end());
+ assert (CE->arg_begin() != CE->arg_end());
SVal X = state->getSVal(*(CE->arg_begin()));
MakeNode(Dst, CE, Pred, state->BindExpr(CE, X));
return true;
}
-
+
case Builtin::BI__builtin_alloca: {
// FIXME: Refactor into StoreManager itself?
MemRegionManager& RM = getStateManager().getRegionManager();
const MemRegion* R =
RM.getAllocaRegion(CE, Builder->getCurrentBlockCount());
-
+
// Set the extent of the region in bytes. This enables us to use the
// SVal of the argument directly. If we save the extent in bits, we
// cannot represent values like symbol*8.
@@ -1543,22 +1543,21 @@
return false;
}
-void GRExprEngine::EvalCall(ExplodedNodeSet& Dst, CallExpr* CE, SVal L,
+void GRExprEngine::EvalCall(ExplodedNodeSet& Dst, CallExpr* CE, SVal L,
ExplodedNode* Pred) {
assert (Builder && "GRStmtNodeBuilder must be defined.");
-
+
// FIXME: Allow us to chain together transfer functions.
if (EvalOSAtomic(Dst, *this, *Builder, CE, L, Pred))
return;
-
+
getTF().EvalCall(Dst, *this, *Builder, CE, L, Pred);
}
void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
CallExpr::arg_iterator AI,
CallExpr::arg_iterator AE,
- ExplodedNodeSet& Dst)
-{
+ ExplodedNodeSet& Dst) {
// Determine the type of function we're calling (if available).
const FunctionProtoType *Proto = NULL;
QualType FnType = CE->getCallee()->IgnoreParens()->getType();
@@ -1571,10 +1570,10 @@
void GRExprEngine::VisitCallRec(CallExpr* CE, ExplodedNode* Pred,
CallExpr::arg_iterator AI,
CallExpr::arg_iterator AE,
- ExplodedNodeSet& Dst,
- const FunctionProtoType *Proto,
+ ExplodedNodeSet& Dst,
+ const FunctionProtoType *Proto,
unsigned ParamIdx) {
-
+
// Process the arguments.
if (AI != AE) {
// If the call argument is being bound to a reference parameter,
@@ -1583,17 +1582,17 @@
if (Proto && ParamIdx < Proto->getNumArgs())
VisitAsLvalue = Proto->getArgType(ParamIdx)->isReferenceType();
- ExplodedNodeSet DstTmp;
+ ExplodedNodeSet DstTmp;
if (VisitAsLvalue)
- VisitLValue(*AI, Pred, DstTmp);
+ VisitLValue(*AI, Pred, DstTmp);
else
- Visit(*AI, Pred, DstTmp);
+ Visit(*AI, Pred, DstTmp);
++AI;
-
+
for (ExplodedNodeSet::iterator DI=DstTmp.begin(), DE=DstTmp.end(); DI != DE;
++DI)
VisitCallRec(CE, *DI, AI, AE, Dst, Proto, ParamIdx + 1);
-
+
return;
}
@@ -1601,17 +1600,17 @@
// the callee expression.
ExplodedNodeSet DstTmp;
Expr* Callee = CE->getCallee()->IgnoreParens();
-
+
{ // Enter new scope to make the lifetime of 'DstTmp2' bounded.
ExplodedNodeSet DstTmp2;
Visit(Callee, Pred, DstTmp2);
-
+
// Perform the previsit of the CallExpr, storing the results in DstTmp.
CheckerVisit(CE, DstTmp, DstTmp2, true);
}
-
+
// Finally, evaluate the function call.
- for (ExplodedNodeSet::iterator DI = DstTmp.begin(), DE = DstTmp.end();
+ for (ExplodedNodeSet::iterator DI = DstTmp.begin(), DE = DstTmp.end();
DI != DE; ++DI) {
const GRState* state = GetState(*DI);
@@ -1621,25 +1620,25 @@
// function pointer values that are symbolic).
// Check for the "noreturn" attribute.
-
+
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
const FunctionDecl* FD = L.getAsFunctionDecl();
MarkNoReturnFunction(FD, CE, state, Builder);
-
+
// Evaluate the call.
if (EvalBuiltinFunction(FD, CE, *DI, Dst))
continue;
- // Dispatch to the plug-in transfer function.
-
+ // Dispatch to the plug-in transfer function.
+
unsigned size = Dst.size();
SaveOr OldHasGen(Builder->HasGeneratedNode);
EvalCall(Dst, CE, L, *DI);
-
+
// Handle the case where no nodes where generated. Auto-generate that
// contains the updated state if we aren't generating sinks.
-
+
if (!Builder->BuildSinks && Dst.size() == size &&
!Builder->HasGeneratedNode)
MakeNode(Dst, CE, *DI, state);
@@ -1656,31 +1655,31 @@
void GRExprEngine::EvalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src, Expr *Ex) {
for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
ExplodedNode *Pred = *I;
-
+
// Test if the previous node was as the same expression. This can happen
// when the expression fails to evaluate to anything meaningful and
// (as an optimization) we don't generate a node.
- ProgramPoint P = Pred->getLocation();
+ ProgramPoint P = Pred->getLocation();
if (!isa<PostStmt>(P) || cast<PostStmt>(P).getStmt() != Ex) {
- Dst.Add(Pred);
+ Dst.Add(Pred);
continue;
- }
+ }
- const GRState* state = Pred->getState();
- SVal V = state->getSVal(Ex);
+ const GRState* state = Pred->getState();
+ SVal V = state->getSVal(Ex);
if (isa<nonloc::SymExprVal>(V)) {
// First assume that the condition is true.
if (const GRState *stateTrue = state->assume(V, true)) {
- stateTrue = stateTrue->BindExpr(Ex,
+ stateTrue = stateTrue->BindExpr(Ex,
ValMgr.makeIntVal(1U, Ex->getType()));
- Dst.Add(Builder->generateNode(PostStmtCustom(Ex,
+ Dst.Add(Builder->generateNode(PostStmtCustom(Ex,
&EagerlyAssumeTag, Pred->getLocationContext()),
stateTrue, Pred));
}
-
+
// Next, assume that the condition is false.
if (const GRState *stateFalse = state->assume(V, false)) {
- stateFalse = stateFalse->BindExpr(Ex,
+ stateFalse = stateFalse->BindExpr(Ex,
ValMgr.makeIntVal(0U, Ex->getType()));
Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag,
Pred->getLocationContext()),
@@ -1699,16 +1698,16 @@
void GRExprEngine::VisitObjCIvarRefExpr(ObjCIvarRefExpr* Ex,
ExplodedNode* Pred, ExplodedNodeSet& Dst,
bool asLValue) {
-
+
Expr* Base = cast<Expr>(Ex->getBase());
ExplodedNodeSet Tmp;
Visit(Base, Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
SVal BaseVal = state->getSVal(Base);
SVal location = state->getLValue(Ex->getDecl(), BaseVal);
-
+
if (asLValue)
MakeNode(Dst, Ex, *I, state->BindExpr(Ex, location));
else
@@ -1722,7 +1721,7 @@
void GRExprEngine::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
-
+
// ObjCForCollectionStmts are processed in two places. This method
// handles the case where an ObjCForCollectionStmt* occurs as one of the
// statements within a basic block. This transfer function does two things:
@@ -1734,7 +1733,7 @@
// whether or not the container has any more elements. This value
// will be tested in ProcessBranch. We need to explicitly bind
// this value because a container can contain nil elements.
- //
+ //
// FIXME: Eventually this logic should actually do dispatches to
// 'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
// This will require simulating a temporary NSFastEnumerationState, either
@@ -1747,10 +1746,10 @@
// For now: simulate (1) by assigning either a symbol or nil if the
// container is empty. Thus this transfer function will by default
// result in state splitting.
-
+
Stmt* elem = S->getElement();
SVal ElementV;
-
+
if (DeclStmt* DS = dyn_cast<DeclStmt>(elem)) {
VarDecl* ElemD = cast<VarDecl>(DS->getSingleDecl());
assert (ElemD->getInit() == 0);
@@ -1761,7 +1760,7 @@
ExplodedNodeSet Tmp;
VisitLValue(cast<Expr>(elem), Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
VisitObjCForCollectionStmtAux(S, *I, Dst, state->getSVal(elem));
@@ -1771,27 +1770,27 @@
void GRExprEngine::VisitObjCForCollectionStmtAux(ObjCForCollectionStmt* S,
ExplodedNode* Pred, ExplodedNodeSet& Dst,
SVal ElementV) {
-
-
+
+
// Get the current state. Use 'EvalLocation' to determine if it is a null
// pointer, etc.
Stmt* elem = S->getElement();
-
+
Pred = EvalLocation(elem, Pred, GetState(Pred), ElementV);
if (!Pred)
return;
-
+
const GRState *state = GetState(Pred);
// Handle the case where the container still has elements.
SVal TrueV = ValMgr.makeTruthVal(1);
const GRState *hasElems = state->BindExpr(S, TrueV);
-
+
// Handle the case where the container has no elements.
SVal FalseV = ValMgr.makeTruthVal(0);
const GRState *noElems = state->BindExpr(S, FalseV);
-
+
if (loc::MemRegionVal* MV = dyn_cast<loc::MemRegionVal>(&ElementV))
if (const TypedRegion* R = dyn_cast<TypedRegion>(MV->getRegion())) {
// FIXME: The proper thing to do is to really iterate over the
@@ -1805,10 +1804,10 @@
hasElems = hasElems->bindLoc(ElementV, V);
// Bind the location to 'nil' on the false branch.
- SVal nilV = ValMgr.makeIntVal(0, T);
- noElems = noElems->bindLoc(ElementV, nilV);
+ SVal nilV = ValMgr.makeIntVal(0, T);
+ noElems = noElems->bindLoc(ElementV, nilV);
}
-
+
// Create the new nodes.
MakeNode(Dst, S, Pred, hasElems);
MakeNode(Dst, S, Pred, noElems);
@@ -1820,38 +1819,38 @@
void GRExprEngine::VisitObjCMessageExpr(ObjCMessageExpr* ME, ExplodedNode* Pred,
ExplodedNodeSet& Dst){
-
+
VisitObjCMessageExprArgHelper(ME, ME->arg_begin(), ME->arg_end(),
Pred, Dst);
-}
+}
void GRExprEngine::VisitObjCMessageExprArgHelper(ObjCMessageExpr* ME,
ObjCMessageExpr::arg_iterator AI,
ObjCMessageExpr::arg_iterator AE,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
if (AI == AE) {
-
+
// Process the receiver.
-
+
if (Expr* Receiver = ME->getReceiver()) {
ExplodedNodeSet Tmp;
Visit(Receiver, Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
VisitObjCMessageExprDispatchHelper(ME, *NI, Dst);
-
+
return;
}
-
+
VisitObjCMessageExprDispatchHelper(ME, Pred, Dst);
return;
}
-
+
ExplodedNodeSet Tmp;
Visit(*AI, Pred, Tmp);
-
+
++AI;
-
+
for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
VisitObjCMessageExprArgHelper(ME, AI, AE, *NI, Dst);
}
@@ -1859,53 +1858,53 @@
void GRExprEngine::VisitObjCMessageExprDispatchHelper(ObjCMessageExpr* ME,
ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
-
- // FIXME: More logic for the processing the method call.
-
+
+ // FIXME: More logic for the processing the method call.
+
const GRState* state = GetState(Pred);
bool RaisesException = false;
-
-
+
+
if (Expr* Receiver = ME->getReceiver()) {
-
+
SVal L = state->getSVal(Receiver);
-
- // Check for undefined control-flow.
+
+ // Check for undefined control-flow.
if (L.isUndef()) {
ExplodedNode* N = Builder->generateNode(ME, state, Pred);
-
+
if (N) {
N->markAsSink();
UndefReceivers.insert(N);
}
-
+
return;
}
-
- // "Assume" that the receiver is not NULL.
+
+ // "Assume" that the receiver is not NULL.
const GRState *StNotNull = state->assume(L, true);
-
- // "Assume" that the receiver is NULL.
+
+ // "Assume" that the receiver is NULL.
const GRState *StNull = state->assume(L, false);
-
+
if (StNull) {
QualType RetTy = ME->getType();
-
+
// Check if the receiver was nil and the return value a struct.
- if(RetTy->isRecordType()) {
+ if (RetTy->isRecordType()) {
if (BR.getParentMap().isConsumedExpr(ME)) {
// The [0 ...] expressions will return garbage. Flag either an
// explicit or implicit error. Because of the structure of this
// function we currently do not bifurfacte the state graph at
// this point.
// FIXME: We should bifurcate and fill the returned struct with
- // garbage.
+ // garbage.
if (ExplodedNode* N = Builder->generateNode(ME, StNull, Pred)) {
N->markAsSink();
if (StNotNull)
NilReceiverStructRetImplicit.insert(N);
else
- NilReceiverStructRetExplicit.insert(N);
+ NilReceiverStructRetExplicit.insert(N);
}
}
}
@@ -1918,13 +1917,13 @@
// sizeof(return type)
const uint64_t returnTypeSize = Ctx.getTypeSize(ME->getType());
- if(voidPtrSize < returnTypeSize) {
+ if (voidPtrSize < returnTypeSize) {
if (ExplodedNode* N = Builder->generateNode(ME, StNull, Pred)) {
N->markAsSink();
- if(StNotNull)
+ if (StNotNull)
NilReceiverLargerThanVoidPtrRetImplicit.insert(N);
else
- NilReceiverLargerThanVoidPtrRetExplicit.insert(N);
+ NilReceiverLargerThanVoidPtrRetExplicit.insert(N);
}
}
else if (!StNotNull) {
@@ -1952,99 +1951,99 @@
// of this method should assume that the receiver is not nil.
if (!StNotNull)
return;
-
+
state = StNotNull;
}
-
+
// Check if the "raise" message was sent.
if (ME->getSelector() == RaiseSel)
RaisesException = true;
}
else {
-
+
IdentifierInfo* ClsName = ME->getClassName();
Selector S = ME->getSelector();
-
+
// Check for special instance methods.
-
- if (!NSExceptionII) {
+
+ if (!NSExceptionII) {
ASTContext& Ctx = getContext();
-
+
NSExceptionII = &Ctx.Idents.get("NSException");
}
-
+
if (ClsName == NSExceptionII) {
-
+
enum { NUM_RAISE_SELECTORS = 2 };
-
+
// Lazily create a cache of the selectors.
if (!NSExceptionInstanceRaiseSelectors) {
-
+
ASTContext& Ctx = getContext();
-
+
NSExceptionInstanceRaiseSelectors = new Selector[NUM_RAISE_SELECTORS];
-
+
llvm::SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
unsigned idx = 0;
-
- // raise:format:
+
+ // raise:format:
II.push_back(&Ctx.Idents.get("raise"));
- II.push_back(&Ctx.Idents.get("format"));
+ II.push_back(&Ctx.Idents.get("format"));
NSExceptionInstanceRaiseSelectors[idx++] =
- Ctx.Selectors.getSelector(II.size(), &II[0]);
-
- // raise:format::arguments:
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format::arguments:
II.push_back(&Ctx.Idents.get("arguments"));
NSExceptionInstanceRaiseSelectors[idx++] =
Ctx.Selectors.getSelector(II.size(), &II[0]);
}
-
+
for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i)
if (S == NSExceptionInstanceRaiseSelectors[i]) {
RaisesException = true; break;
}
}
}
-
+
// Check for any arguments that are uninitialized/undefined.
-
+
for (ObjCMessageExpr::arg_iterator I = ME->arg_begin(), E = ME->arg_end();
I != E; ++I) {
-
+
if (state->getSVal(*I).isUndef()) {
-
+
// Generate an error node for passing an uninitialized/undefined value
// as an argument to a message expression. This node is a sink.
ExplodedNode* N = Builder->generateNode(ME, state, Pred);
-
+
if (N) {
N->markAsSink();
MsgExprUndefArgs[N] = *I;
}
-
+
return;
- }
+ }
}
-
+
// Check if we raise an exception. For now treat these as sinks. Eventually
// we will want to handle exceptions properly.
-
+
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
if (RaisesException)
Builder->BuildSinks = true;
-
+
// Dispatch to plug-in transfer function.
-
+
unsigned size = Dst.size();
SaveOr OldHasGen(Builder->HasGeneratedNode);
-
+
EvalObjCMessageExpr(Dst, ME, Pred);
-
+
// Handle the case where no nodes where generated. Auto-generate that
// contains the updated state if we aren't generating sinks.
-
+
if (!Builder->BuildSinks && Dst.size() == size && !Builder->HasGeneratedNode)
MakeNode(Dst, ME, Pred, state);
}
@@ -2065,9 +2064,9 @@
VisitLValue(Ex, Pred, S1);
else
Visit(Ex, Pred, S1);
-
+
// Check for casting to "void".
- if (T->isVoidType()) {
+ if (T->isVoidType()) {
for (ExplodedNodeSet::iterator I1 = S1.begin(), E1 = S1.end(); I1 != E1; ++I1)
Dst.Add(*I1);
@@ -2085,13 +2084,13 @@
}
void GRExprEngine::VisitCompoundLiteralExpr(CompoundLiteralExpr* CL,
- ExplodedNode* Pred,
- ExplodedNodeSet& Dst,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst,
bool asLValue) {
InitListExpr* ILE = cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
ExplodedNodeSet Tmp;
Visit(ILE, Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I!=EI; ++I) {
const GRState* state = GetState(*I);
SVal ILV = state->getSVal(ILE);
@@ -2105,15 +2104,15 @@
}
void GRExprEngine::VisitDeclStmt(DeclStmt *DS, ExplodedNode *Pred,
- ExplodedNodeSet& Dst) {
+ ExplodedNodeSet& Dst) {
- // The CFG has one DeclStmt per Decl.
+ // The CFG has one DeclStmt per Decl.
Decl* D = *DS->decl_begin();
-
+
if (!D || !isa<VarDecl>(D))
return;
-
- const VarDecl* VD = dyn_cast<VarDecl>(D);
+
+ const VarDecl* VD = dyn_cast<VarDecl>(D);
Expr* InitEx = const_cast<Expr*>(VD->getInit());
// FIXME: static variables may have an initializer, but the second
@@ -2124,7 +2123,7 @@
Visit(InitEx, Pred, Tmp);
else
Tmp.Add(Pred);
-
+
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
unsigned Count = Builder->getCurrentBlockCount();
@@ -2133,58 +2132,58 @@
QualType T = getContext().getCanonicalType(VD->getType());
if (VariableArrayType* VLA = dyn_cast<VariableArrayType>(T)) {
// FIXME: Handle multi-dimensional VLAs.
-
+
Expr* SE = VLA->getSizeExpr();
SVal Size = state->getSVal(SE);
-
+
if (Size.isUndef()) {
if (ExplodedNode* N = Builder->generateNode(DS, state, Pred)) {
- N->markAsSink();
+ N->markAsSink();
ExplicitBadSizedVLA.insert(N);
}
continue;
}
-
- const GRState* zeroState = state->assume(Size, false);
+
+ const GRState* zeroState = state->assume(Size, false);
state = state->assume(Size, true);
-
+
if (zeroState) {
if (ExplodedNode* N = Builder->generateNode(DS, zeroState, Pred)) {
- N->markAsSink();
+ N->markAsSink();
if (state)
ImplicitBadSizedVLA.insert(N);
else
ExplicitBadSizedVLA.insert(N);
}
}
-
+
if (!state)
- continue;
+ continue;
}
-
+
// Decls without InitExpr are not initialized explicitly.
const LocationContext *LC = (*I)->getLocationContext();
if (InitEx) {
SVal InitVal = state->getSVal(InitEx);
QualType T = VD->getType();
-
+
// Recover some path-sensitivity if a scalar value evaluated to
// UnknownVal.
- if (InitVal.isUnknown() ||
+ if (InitVal.isUnknown() ||
!getConstraintManager().canReasonAbout(InitVal)) {
InitVal = ValMgr.getConjuredSymbolVal(InitEx, Count);
- }
-
+ }
+
state = state->bindDecl(VD, LC, InitVal);
-
+
// The next thing to do is check if the GRTransferFuncs object wants to
// update the state based on the new binding. If the GRTransferFunc
// object doesn't do anything, just auto-propagate the current state.
GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, *I, state, DS,true);
getTF().EvalBind(BuilderRef, loc::MemRegionVal(state->getRegion(VD, LC)),
- InitVal);
- }
+ InitVal);
+ }
else {
state = state->bindDeclWithNoInit(VD, LC);
MakeNode(Dst, DS, *I, state);
@@ -2200,7 +2199,7 @@
llvm::ImmutableList<SVal> Vals;
ExplodedNode* N;
InitListExpr::reverse_iterator Itr;
-
+
InitListWLItem(ExplodedNode* n, llvm::ImmutableList<SVal> vals,
InitListExpr::reverse_iterator itr)
: Vals(vals), N(n), Itr(itr) {}
@@ -2208,52 +2207,52 @@
}
-void GRExprEngine::VisitInitListExpr(InitListExpr* E, ExplodedNode* Pred,
+void GRExprEngine::VisitInitListExpr(InitListExpr* E, ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
const GRState* state = GetState(Pred);
QualType T = getContext().getCanonicalType(E->getType());
- unsigned NumInitElements = E->getNumInits();
+ unsigned NumInitElements = E->getNumInits();
if (T->isArrayType() || T->isStructureType() ||
T->isUnionType() || T->isVectorType()) {
llvm::ImmutableList<SVal> StartVals = getBasicVals().getEmptySValList();
-
+
// Handle base case where the initializer has no elements.
// e.g: static int* myArray[] = {};
if (NumInitElements == 0) {
SVal V = ValMgr.makeCompoundVal(T, StartVals);
MakeNode(Dst, E, Pred, state->BindExpr(E, V));
return;
- }
-
+ }
+
// Create a worklist to process the initializers.
llvm::SmallVector<InitListWLItem, 10> WorkList;
- WorkList.reserve(NumInitElements);
- WorkList.push_back(InitListWLItem(Pred, StartVals, E->rbegin()));
+ WorkList.reserve(NumInitElements);
+ WorkList.push_back(InitListWLItem(Pred, StartVals, E->rbegin()));
InitListExpr::reverse_iterator ItrEnd = E->rend();
-
+
// Process the worklist until it is empty.
while (!WorkList.empty()) {
InitListWLItem X = WorkList.back();
WorkList.pop_back();
-
+
ExplodedNodeSet Tmp;
Visit(*X.Itr, X.N, Tmp);
-
+
InitListExpr::reverse_iterator NewItr = X.Itr + 1;
for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
// Get the last initializer value.
state = GetState(*NI);
SVal InitV = state->getSVal(cast<Expr>(*X.Itr));
-
+
// Construct the new list of values by prepending the new value to
// the already constructed list.
llvm::ImmutableList<SVal> NewVals =
getBasicVals().consVals(InitV, X.Vals);
-
+
if (NewItr == ItrEnd) {
// Now we have a list holding all init values. Make CompoundValData.
SVal V = ValMgr.makeCompoundVal(T, NewVals);
@@ -2267,7 +2266,7 @@
}
}
}
-
+
return;
}
@@ -2293,10 +2292,10 @@
ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
QualType T = Ex->getTypeOfArgument();
- uint64_t amt;
-
+ uint64_t amt;
+
if (Ex->isSizeOf()) {
- if (T == getContext().VoidTy) {
+ if (T == getContext().VoidTy) {
// sizeof(void) == 1 byte.
amt = 1;
}
@@ -2307,17 +2306,17 @@
else if (T->isObjCInterfaceType()) {
// Some code tries to take the sizeof an ObjCInterfaceType, relying that
// the compiler has laid out its representation. Just report Unknown
- // for these.
+ // for these.
return;
}
else {
// All other cases.
amt = getContext().getTypeSize(T) / 8;
- }
+ }
}
else // Get alignment of the type.
amt = getContext().getTypeAlign(T) / 8;
-
+
MakeNode(Dst, Ex, Pred,
GetState(Pred)->BindExpr(Ex, ValMgr.makeIntVal(amt, Ex->getType())));
}
@@ -2327,61 +2326,61 @@
ExplodedNodeSet& Dst, bool asLValue) {
switch (U->getOpcode()) {
-
+
default:
break;
-
+
case UnaryOperator::Deref: {
-
+
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
-
+
const GRState* state = GetState(*I);
SVal location = state->getSVal(Ex);
-
+
if (asLValue)
MakeNode(Dst, U, *I, state->BindExpr(U, location),
ProgramPoint::PostLValueKind);
else
EvalLoad(Dst, U, *I, state, location);
- }
+ }
return;
}
-
+
case UnaryOperator::Real: {
-
+
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
-
+
// FIXME: We don't have complex SValues yet.
if (Ex->getType()->isAnyComplexType()) {
// Just report "Unknown."
Dst.Add(*I);
continue;
}
-
+
// For all other types, UnaryOperator::Real is an identity operation.
assert (U->getType() == Ex->getType());
const GRState* state = GetState(*I);
MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
- }
-
+ }
+
return;
}
-
+
case UnaryOperator::Imag: {
-
+
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
// FIXME: We don't have complex SValues yet.
if (Ex->getType()->isAnyComplexType()) {
@@ -2389,25 +2388,25 @@
Dst.Add(*I);
continue;
}
-
+
// For all other types, UnaryOperator::Float returns 0.
assert (Ex->getType()->isIntegerType());
const GRState* state = GetState(*I);
SVal X = ValMgr.makeZeroVal(Ex->getType());
MakeNode(Dst, U, *I, state->BindExpr(U, X));
}
-
+
return;
}
-
- // FIXME: Just report "Unknown" for OffsetOf.
+
+ // FIXME: Just report "Unknown" for OffsetOf.
case UnaryOperator::OffsetOf:
Dst.Add(Pred);
return;
-
+
case UnaryOperator::Plus: assert (!asLValue); // FALL-THROUGH.
case UnaryOperator::Extension: {
-
+
// Unary "+" is a no-op, similar to a parentheses. We still have places
// where it may be a block-level expression, so we need to
// generate an extra node that just propagates the value of the
@@ -2416,44 +2415,44 @@
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
}
-
+
return;
}
-
+
case UnaryOperator::AddrOf: {
-
+
assert(!asLValue);
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
VisitLValue(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
SVal V = state->getSVal(Ex);
state = state->BindExpr(U, V);
MakeNode(Dst, U, *I, state);
}
- return;
+ return;
}
-
+
case UnaryOperator::LNot:
case UnaryOperator::Minus:
case UnaryOperator::Not: {
-
+
assert (!asLValue);
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
-
+
// Get the value of the subexpression.
SVal V = state->getSVal(Ex);
@@ -2461,41 +2460,41 @@
MakeNode(Dst, U, *I, state->BindExpr(U, V));
continue;
}
-
+
// QualType DstT = getContext().getCanonicalType(U->getType());
// QualType SrcT = getContext().getCanonicalType(Ex->getType());
-//
+//
// if (DstT != SrcT) // Perform promotions.
-// V = EvalCast(V, DstT);
-//
+// V = EvalCast(V, DstT);
+//
// if (V.isUnknownOrUndef()) {
// MakeNode(Dst, U, *I, BindExpr(St, U, V));
// continue;
// }
-
+
switch (U->getOpcode()) {
default:
assert(false && "Invalid Opcode.");
break;
-
+
case UnaryOperator::Not:
// FIXME: Do we need to handle promotions?
state = state->BindExpr(U, EvalComplement(cast<NonLoc>(V)));
- break;
-
+ break;
+
case UnaryOperator::Minus:
// FIXME: Do we need to handle promotions?
state = state->BindExpr(U, EvalMinus(cast<NonLoc>(V)));
- break;
-
- case UnaryOperator::LNot:
-
+ break;
+
+ case UnaryOperator::LNot:
+
// C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
//
// Note: technically we do "E == 0", but this is the same in the
// transfer functions as "0 == E".
SVal Result;
-
+
if (isa<Loc>(V)) {
Loc X = ValMgr.makeNull();
Result = EvalBinOp(state, BinaryOperator::EQ, cast<Loc>(V), X,
@@ -2506,15 +2505,15 @@
Result = EvalBinOp(BinaryOperator::EQ, cast<NonLoc>(V), X,
U->getType());
}
-
+
state = state->BindExpr(U, Result);
-
+
break;
}
-
+
MakeNode(Dst, U, *I, state);
}
-
+
return;
}
}
@@ -2525,28 +2524,28 @@
ExplodedNodeSet Tmp;
Expr* Ex = U->getSubExpr()->IgnoreParens();
VisitLValue(Ex, Pred, Tmp);
-
+
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
-
+
const GRState* state = GetState(*I);
SVal V1 = state->getSVal(Ex);
-
- // Perform a load.
+
+ // Perform a load.
ExplodedNodeSet Tmp2;
EvalLoad(Tmp2, Ex, *I, state, V1);
for (ExplodedNodeSet::iterator I2 = Tmp2.begin(), E2 = Tmp2.end(); I2!=E2; ++I2) {
-
+
state = GetState(*I2);
SVal V2 = state->getSVal(Ex);
-
- // Propagate unknown and undefined values.
+
+ // Propagate unknown and undefined values.
if (V2.isUnknownOrUndef()) {
MakeNode(Dst, U, *I2, state->BindExpr(U, V2));
continue;
}
-
- // Handle all other values.
+
+ // Handle all other values.
BinaryOperator::Opcode Op = U->isIncrementOp() ? BinaryOperator::Add
: BinaryOperator::Sub;
@@ -2560,37 +2559,37 @@
else
RHS = ValMgr.makeIntVal(1, U->getType());
- SVal Result = EvalBinOp(state, Op, V2, RHS, U->getType());
-
+ SVal Result = EvalBinOp(state, Op, V2, RHS, U->getType());
+
// Conjure a new symbol if necessary to recover precision.
if (Result.isUnknown() || !getConstraintManager().canReasonAbout(Result)){
Result = ValMgr.getConjuredSymbolVal(Ex,
Builder->getCurrentBlockCount());
-
+
// If the value is a location, ++/-- should always preserve
// non-nullness. Check if the original value was non-null, and if so
- // propagate that constraint.
+ // propagate that constraint.
if (Loc::IsLocType(U->getType())) {
SVal Constraint = EvalBinOp(state, BinaryOperator::EQ, V2,
ValMgr.makeZeroVal(U->getType()),
- getContext().IntTy);
-
+ getContext().IntTy);
+
if (!state->assume(Constraint, true)) {
// It isn't feasible for the original value to be null.
// Propagate this constraint.
Constraint = EvalBinOp(state, BinaryOperator::EQ, Result,
ValMgr.makeZeroVal(U->getType()),
getContext().IntTy);
-
+
state = state->assume(Constraint, false);
assert(state);
- }
- }
+ }
+ }
}
-
+
state = state->BindExpr(U, U->isPostfix() ? V2 : Result);
- // Perform the store.
+ // Perform the store.
EvalStore(Dst, U, *I2, state, V1, Result);
}
}
@@ -2598,7 +2597,7 @@
void GRExprEngine::VisitAsmStmt(AsmStmt* A, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
VisitAsmStmtHelperOutputs(A, A->begin_outputs(), A->end_outputs(), Pred, Dst);
-}
+}
void GRExprEngine::VisitAsmStmtHelperOutputs(AsmStmt* A,
AsmStmt::outputs_iterator I,
@@ -2608,12 +2607,12 @@
VisitAsmStmtHelperInputs(A, A->begin_inputs(), A->end_inputs(), Pred, Dst);
return;
}
-
+
ExplodedNodeSet Tmp;
VisitLValue(*I, Pred, Tmp);
-
+
++I;
-
+
for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
VisitAsmStmtHelperOutputs(A, I, E, *NI, Dst);
}
@@ -2623,35 +2622,35 @@
AsmStmt::inputs_iterator E,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
if (I == E) {
-
+
// We have processed both the inputs and the outputs. All of the outputs
// should evaluate to Locs. Nuke all of their values.
-
+
// FIXME: Some day in the future it would be nice to allow a "plug-in"
// which interprets the inline asm and stores proper results in the
// outputs.
-
+
const GRState* state = GetState(Pred);
-
+
for (AsmStmt::outputs_iterator OI = A->begin_outputs(),
OE = A->end_outputs(); OI != OE; ++OI) {
-
- SVal X = state->getSVal(*OI);
+
+ SVal X = state->getSVal(*OI);
assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
-
+
if (isa<Loc>(X))
state = state->bindLoc(cast<Loc>(X), UnknownVal());
}
-
+
MakeNode(Dst, A, Pred, state);
return;
}
-
+
ExplodedNodeSet Tmp;
Visit(*I, Pred, Tmp);
-
+
++I;
-
+
for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI!=NE; ++NI)
VisitAsmStmtHelperInputs(A, I, E, *NI, Dst);
}
@@ -2659,16 +2658,16 @@
void GRExprEngine::EvalReturn(ExplodedNodeSet& Dst, ReturnStmt* S,
ExplodedNode* Pred) {
assert (Builder && "GRStmtNodeBuilder must be defined.");
-
- unsigned size = Dst.size();
+
+ unsigned size = Dst.size();
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
SaveOr OldHasGen(Builder->HasGeneratedNode);
getTF().EvalReturn(Dst, *this, *Builder, S, Pred);
-
+
// Handle the case where no nodes where generated.
-
+
if (!Builder->BuildSinks && Dst.size() == size && !Builder->HasGeneratedNode)
MakeNode(Dst, S, Pred, GetState(Pred));
}
@@ -2677,7 +2676,7 @@
ExplodedNodeSet& Dst) {
Expr* R = S->getRetValue();
-
+
if (!R) {
EvalReturn(Dst, S, Pred);
return;
@@ -2688,12 +2687,12 @@
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
SVal X = (*I)->getState()->getSVal(R);
-
+
// Check if we return the address of a stack variable.
if (isa<loc::MemRegionVal>(X)) {
// Determine if the value is on the stack.
const MemRegion* R = cast<loc::MemRegionVal>(&X)->getRegion();
-
+
if (R && R->hasStackStorage()) {
// Create a special node representing the error.
if (ExplodedNode* N = Builder->generateNode(S, GetState(*I), *I)) {
@@ -2711,7 +2710,7 @@
}
continue;
}
-
+
EvalReturn(Dst, S, *I);
}
}
@@ -2727,13 +2726,13 @@
ExplodedNodeSet Tmp1;
Expr* LHS = B->getLHS()->IgnoreParens();
Expr* RHS = B->getRHS()->IgnoreParens();
-
+
// FIXME: Add proper support for ObjCImplicitSetterGetterRefExpr.
if (isa<ObjCImplicitSetterGetterRefExpr>(LHS)) {
- Visit(RHS, Pred, Dst);
+ Visit(RHS, Pred, Dst);
return;
}
-
+
if (B->isAssignmentOp())
VisitLValue(LHS, Pred, Tmp1);
else
@@ -2742,18 +2741,18 @@
for (ExplodedNodeSet::iterator I1=Tmp1.begin(), E1=Tmp1.end(); I1!=E1; ++I1) {
SVal LeftV = (*I1)->getState()->getSVal(LHS);
-
+
// Process the RHS.
-
+
ExplodedNodeSet Tmp2;
Visit(RHS, *I1, Tmp2);
ExplodedNodeSet CheckedSet;
CheckerVisit(B, CheckedSet, Tmp2, true);
-
+
// With both the LHS and RHS evaluated, process the operation itself.
-
- for (ExplodedNodeSet::iterator I2=CheckedSet.begin(), E2=CheckedSet.end();
+
+ for (ExplodedNodeSet::iterator I2=CheckedSet.begin(), E2=CheckedSet.end();
I2 != E2; ++I2) {
const GRState* state = GetState(*I2);
@@ -2761,41 +2760,41 @@
SVal RightV = state->getSVal(RHS);
BinaryOperator::Opcode Op = B->getOpcode();
-
+
switch (Op) {
-
+
case BinaryOperator::Assign: {
-
+
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
QualType T = RHS->getType();
-
- if ((RightV.isUnknown() ||
- !getConstraintManager().canReasonAbout(RightV))
- && (Loc::IsLocType(T) ||
+
+ if ((RightV.isUnknown() ||
+ !getConstraintManager().canReasonAbout(RightV))
+ && (Loc::IsLocType(T) ||
(T->isScalarType() && T->isIntegerType()))) {
- unsigned Count = Builder->getCurrentBlockCount();
+ unsigned Count = Builder->getCurrentBlockCount();
RightV = ValMgr.getConjuredSymbolVal(B->getRHS(), Count);
}
-
+
// Simulate the effects of a "store": bind the value of the RHS
- // to the L-Value represented by the LHS.
- EvalStore(Dst, B, LHS, *I2, state->BindExpr(B, RightV),
+ // to the L-Value represented by the LHS.
+ EvalStore(Dst, B, LHS, *I2, state->BindExpr(B, RightV),
LeftV, RightV);
continue;
}
-
+
// FALL-THROUGH.
default: {
-
+
if (B->isAssignmentOp())
break;
-
+
// Process non-assignments except commas or short-circuited
- // logical expressions (LAnd and LOr).
+ // logical expressions (LAnd and LOr).
SVal Result = EvalBinOp(state, Op, LeftV, RightV, B->getType());
-
+
if (Result.isUnknown()) {
if (OldSt != state) {
// Generate a new node if we have already created a new state.
@@ -2803,30 +2802,30 @@
}
else
Dst.Add(*I2);
-
+
continue;
}
-
+
if (Result.isUndef() && !LeftV.isUndef() && !RightV.isUndef()) {
-
+
// The operands were *not* undefined, but the result is undefined.
// This is a special node that should be flagged as an error.
-
+
if (ExplodedNode* UndefNode = Builder->generateNode(B, state, *I2)){
- UndefNode->markAsSink();
+ UndefNode->markAsSink();
UndefResults.insert(UndefNode);
}
-
+
continue;
}
-
+
// Otherwise, create a new node.
-
+
MakeNode(Dst, B, *I2, state->BindExpr(B, Result));
continue;
}
}
-
+
assert (B->isCompoundAssignmentOp());
switch (Op) {
@@ -2843,26 +2842,26 @@
case BinaryOperator::XorAssign: Op = BinaryOperator::Xor; break;
case BinaryOperator::OrAssign: Op = BinaryOperator::Or; break;
}
-
+
// Perform a load (the LHS). This performs the checks for
// null dereferences, and so on.
ExplodedNodeSet Tmp3;
SVal location = state->getSVal(LHS);
EvalLoad(Tmp3, LHS, *I2, state, location);
-
- for (ExplodedNodeSet::iterator I3=Tmp3.begin(), E3=Tmp3.end(); I3!=E3;
+
+ for (ExplodedNodeSet::iterator I3=Tmp3.begin(), E3=Tmp3.end(); I3!=E3;
++I3) {
-
+
state = GetState(*I3);
SVal V = state->getSVal(LHS);
- // Propagate undefined values (left-side).
+ // Propagate undefined values (left-side).
if (V.isUndef()) {
- EvalStore(Dst, B, LHS, *I3, state->BindExpr(B, V),
+ EvalStore(Dst, B, LHS, *I3, state->BindExpr(B, V),
location, V);
continue;
}
-
+
// Propagate unknown values (left and right-side).
if (RightV.isUnknown() || V.isUnknown()) {
EvalStore(Dst, B, LHS, *I3, state->BindExpr(B, UnknownVal()),
@@ -2874,7 +2873,7 @@
//
// The LHS is not Undef/Unknown.
// The RHS is not Unknown.
-
+
// Get the computation type.
QualType CTy =
cast<CompoundAssignOperator>(B)->getComputationResultType();
@@ -2890,24 +2889,24 @@
// Promote LHS.
llvm::tie(state, V) = SVator.EvalCast(V, state, CLHSTy, LTy);
- // Evaluate operands and promote to result type.
- if (RightV.isUndef()) {
- // Propagate undefined values (right-side).
+ // Evaluate operands and promote to result type.
+ if (RightV.isUndef()) {
+ // Propagate undefined values (right-side).
EvalStore(Dst, B, LHS, *I3, state->BindExpr(B, RightV), location,
RightV);
continue;
}
-
- // Compute the result of the operation.
+
+ // Compute the result of the operation.
SVal Result;
llvm::tie(state, Result) = SVator.EvalCast(EvalBinOp(state, Op, V,
RightV, CTy),
state, B->getType(), CTy);
-
+
if (Result.isUndef()) {
// The operands were not undefined, but the result is undefined.
if (ExplodedNode* UndefNode = Builder->generateNode(B, state, *I3)) {
- UndefNode->markAsSink();
+ UndefNode->markAsSink();
UndefResults.insert(UndefNode);
}
continue;
@@ -2915,21 +2914,21 @@
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
-
+
SVal LHSVal;
-
- if ((Result.isUnknown() ||
+
+ if ((Result.isUnknown() ||
!getConstraintManager().canReasonAbout(Result))
- && (Loc::IsLocType(CTy)
+ && (Loc::IsLocType(CTy)
|| (CTy->isScalarType() && CTy->isIntegerType()))) {
-
+
unsigned Count = Builder->getCurrentBlockCount();
-
+
// The symbolic value is actually for the type of the left-hand side
// expression, not the computation type, as this is the value the
// LValue on the LHS will bind to.
LHSVal = ValMgr.getConjuredSymbolVal(B->getRHS(), LTy, Count);
-
+
// However, we need to convert the symbol to the computation type.
llvm::tie(state, Result) = SVator.EvalCast(LHSVal, state, CTy, LTy);
}
@@ -2938,8 +2937,8 @@
// computation type.
llvm::tie(state, LHSVal) = SVator.EvalCast(Result, state, LTy, CTy);
}
-
- EvalStore(Dst, B, LHS, *I3, state->BindExpr(B, Result),
+
+ EvalStore(Dst, B, LHS, *I3, state->BindExpr(B, Result),
location, LHSVal);
}
}
@@ -2958,9 +2957,9 @@
template<>
struct VISIBILITY_HIDDEN DOTGraphTraits<ExplodedNode*> :
public DefaultDOTGraphTraits {
-
+
static std::string getNodeAttributes(const ExplodedNode* N, void*) {
-
+
if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
GraphPrintCheckerState->isExplicitNullDeref(N) ||
GraphPrintCheckerState->isUndefDeref(N) ||
@@ -2972,50 +2971,50 @@
GraphPrintCheckerState->isBadCall(N) ||
GraphPrintCheckerState->isUndefArg(N))
return "color=\"red\",style=\"filled\"";
-
+
if (GraphPrintCheckerState->isNoReturnCall(N))
return "color=\"blue\",style=\"filled\"";
-
+
return "";
}
-
+
static std::string getNodeLabel(const ExplodedNode* N, void*,bool ShortNames){
-
+
std::string sbuf;
llvm::raw_string_ostream Out(sbuf);
// Program Location.
ProgramPoint Loc = N->getLocation();
-
+
switch (Loc.getKind()) {
case ProgramPoint::BlockEntranceKind:
- Out << "Block Entrance: B"
+ Out << "Block Entrance: B"
<< cast<BlockEntrance>(Loc).getBlock()->getBlockID();
break;
-
+
case ProgramPoint::BlockExitKind:
assert (false);
break;
-
+
default: {
if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
const Stmt* S = L->getStmt();
SourceLocation SLoc = S->getLocStart();
- Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
+ Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
LangOptions LO; // FIXME.
S->printPretty(Out, 0, PrintingPolicy(LO));
-
- if (SLoc.isFileID()) {
+
+ if (SLoc.isFileID()) {
Out << "\\lline="
<< GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
<< " col="
<< GraphPrintSourceManager->getInstantiationColumnNumber(SLoc)
<< "\\l";
}
-
+
if (isa<PreStmt>(Loc))
- Out << "\\lPreStmt\\l;";
+ Out << "\\lPreStmt\\l;";
else if (isa<PostLoad>(Loc))
Out << "\\lPostLoad\\l;";
else if (isa<PostStore>(Loc))
@@ -3026,7 +3025,7 @@
Out << "\\lPostLocationChecksSucceed\\l";
else if (isa<PostNullCheckFailed>(Loc))
Out << "\\lPostNullCheckFailed\\l";
-
+
if (GraphPrintCheckerState->isImplicitNullDeref(N))
Out << "\\|Implicit-Null Dereference.\\l";
else if (GraphPrintCheckerState->isExplicitNullDeref(N))
@@ -3047,43 +3046,43 @@
Out << "\\|Call to NULL/Undefined.";
else if (GraphPrintCheckerState->isUndefArg(N))
Out << "\\|Argument in call is undefined";
-
+
break;
}
const BlockEdge& E = cast<BlockEdge>(Loc);
Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
<< E.getDst()->getBlockID() << ')';
-
+
if (Stmt* T = E.getSrc()->getTerminator()) {
-
+
SourceLocation SLoc = T->getLocStart();
-
+
Out << "\\|Terminator: ";
LangOptions LO; // FIXME.
E.getSrc()->printTerminator(Out, LO);
-
+
if (SLoc.isFileID()) {
Out << "\\lline="
<< GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
<< " col="
<< GraphPrintSourceManager->getInstantiationColumnNumber(SLoc);
}
-
+
if (isa<SwitchStmt>(T)) {
Stmt* Label = E.getDst()->getLabel();
-
- if (Label) {
+
+ if (Label) {
if (CaseStmt* C = dyn_cast<CaseStmt>(Label)) {
Out << "\\lcase ";
LangOptions LO; // FIXME.
C->getLHS()->printPretty(Out, 0, PrintingPolicy(LO));
-
+
if (Stmt* RHS = C->getRHS()) {
Out << " .. ";
RHS->printPretty(Out, 0, PrintingPolicy(LO));
}
-
+
Out << ":";
}
else {
@@ -3091,7 +3090,7 @@
Out << "\\ldefault:";
}
}
- else
+ else
Out << "\\l(implicit) default:";
}
else if (isa<IndirectGotoStmt>(T)) {
@@ -3102,28 +3101,28 @@
if (*E.getSrc()->succ_begin() == E.getDst())
Out << "true";
else
- Out << "false";
+ Out << "false";
}
-
+
Out << "\\l";
}
-
+
if (GraphPrintCheckerState->isUndefControlFlow(N)) {
Out << "\\|Control-flow based on\\lUndefined value.\\l";
}
}
}
-
+
Out << "\\|StateID: " << (void*) N->getState() << "\\|";
const GRState *state = N->getState();
state->printDOT(Out);
-
+
Out << "\\l";
return Out.str();
}
};
-} // end llvm namespace
+} // end llvm namespace
#endif
#ifndef NDEBUG
@@ -3138,7 +3137,7 @@
#endif
void GRExprEngine::ViewGraph(bool trim) {
-#ifndef NDEBUG
+#ifndef NDEBUG
if (trim) {
std::vector<ExplodedNode*> Src;
@@ -3150,14 +3149,14 @@
// Iterate through the reports and get their nodes.
for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I) {
for (BugType::const_iterator I2=(*I)->begin(), E2=(*I)->end();
- I2!=E2; ++I2) {
+ I2!=E2; ++I2) {
const BugReportEquivClass& EQ = *I2;
const BugReport &R = **EQ.begin();
ExplodedNode *N = const_cast<ExplodedNode*>(R.getEndNode());
if (N) Src.push_back(N);
}
}
-
+
ViewGraph(&Src[0], &Src[0]+Src.size());
}
else {
@@ -3165,7 +3164,7 @@
GraphPrintSourceManager = &getContext().getSourceManager();
llvm::ViewGraph(*G.roots_begin(), "GRExprEngine");
-
+
GraphPrintCheckerState = NULL;
GraphPrintSourceManager = NULL;
}
@@ -3176,14 +3175,14 @@
#ifndef NDEBUG
GraphPrintCheckerState = this;
GraphPrintSourceManager = &getContext().getSourceManager();
-
+
std::auto_ptr<ExplodedGraph> TrimmedG(G.Trim(Beg, End).first);
if (!TrimmedG.get())
llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
else
- llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedGRExprEngine");
-
+ llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedGRExprEngine");
+
GraphPrintCheckerState = NULL;
GraphPrintSourceManager = NULL;
#endif
diff --git a/lib/Analysis/GRExprEngineInternalChecks.cpp b/lib/Analysis/GRExprEngineInternalChecks.cpp
index ab19a6a..ab6874a 100644
--- a/lib/Analysis/GRExprEngineInternalChecks.cpp
+++ b/lib/Analysis/GRExprEngineInternalChecks.cpp
@@ -48,15 +48,15 @@
BuiltinBugReport(BugType& bt, const char* desc,
ExplodedNode *n)
: RangedBugReport(bt, desc, n) {}
-
+
BuiltinBugReport(BugType& bt, const char *shortDesc, const char *desc,
ExplodedNode *n)
- : RangedBugReport(bt, shortDesc, desc, n) {}
-
+ : RangedBugReport(bt, shortDesc, desc, n) {}
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N);
-};
-
+};
+
class VISIBILITY_HIDDEN BuiltinBug : public BugType {
GRExprEngine &Eng;
protected:
@@ -69,30 +69,30 @@
: BugType(n, "Logic errors"), Eng(*eng), desc(n) {}
const std::string &getDescription() const { return desc; }
-
+
virtual void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {}
void FlushReports(BugReporter& BR) { FlushReportsImpl(BR, Eng); }
-
+
virtual void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {}
-
+
template <typename ITER> void Emit(BugReporter& BR, ITER I, ITER E);
};
-
-
+
+
template <typename ITER>
void BuiltinBug::Emit(BugReporter& BR, ITER I, ITER E) {
for (; I != E; ++I) BR.EmitReport(new BuiltinBugReport(*this, desc.c_str(),
GetNode(I)));
-}
+}
void BuiltinBugReport::registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N) {
static_cast<BuiltinBug&>(getBugType()).registerInitialVisitors(BRC, N, this);
-}
-
+}
+
class VISIBILITY_HIDDEN NullDeref : public BuiltinBug {
public:
NullDeref(GRExprEngine* eng)
@@ -101,14 +101,14 @@
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
Emit(BR, Eng.null_derefs_begin(), Eng.null_derefs_end());
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetDerefExpr(N), N);
}
};
-
+
class VISIBILITY_HIDDEN NilReceiverStructRet : public BuiltinBug {
public:
NilReceiverStructRet(GRExprEngine* eng) :
@@ -133,7 +133,7 @@
BR.EmitReport(R);
}
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
@@ -146,12 +146,12 @@
NilReceiverLargerThanVoidPtrRet(GRExprEngine* eng) :
BuiltinBug(eng,
"'nil' receiver with return type larger than sizeof(void *)") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::nil_receiver_larger_than_voidptr_ret_iterator
I=Eng.nil_receiver_larger_than_voidptr_ret_begin(),
E=Eng.nil_receiver_larger_than_voidptr_ret_end(); I!=E; ++I) {
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
PostStmt P = cast<PostStmt>((*I)->getLocation());
@@ -162,28 +162,28 @@
<< "' and of size "
<< Eng.getContext().getTypeSize(ME->getType()) / 8
<< " bytes) to be garbage or otherwise undefined.";
-
+
BuiltinBugReport *R = new BuiltinBugReport(*this, os.str().c_str(), *I);
R->addRange(ME->getReceiver()->getSourceRange());
BR.EmitReport(R);
}
- }
+ }
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
}
};
-
+
class VISIBILITY_HIDDEN UndefinedDeref : public BuiltinBug {
public:
UndefinedDeref(GRExprEngine* eng)
: BuiltinBug(eng,"Dereference of undefined pointer value") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
Emit(BR, Eng.undef_derefs_begin(), Eng.undef_derefs_end());
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
@@ -196,30 +196,30 @@
DivZero(GRExprEngine* eng = 0)
: BuiltinBug(eng,"Division-by-zero",
"Division by zero or undefined value.") {}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetDenomExpr(N), N);
}
};
-
+
class VISIBILITY_HIDDEN UndefResult : public BuiltinBug {
public:
UndefResult(GRExprEngine* eng) : BuiltinBug(eng,"Undefined result",
"Result of operation is undefined.") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
Emit(BR, Eng.undef_results_begin(), Eng.undef_results_end());
}
};
-
+
class VISIBILITY_HIDDEN BadCall : public BuiltinBug {
public:
BadCall(GRExprEngine *eng = 0)
: BuiltinBug(eng, "Invalid function call",
"Called function pointer is a null or undefined pointer value") {}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
@@ -234,57 +234,57 @@
ArgReport(BugType& bt, const char* desc, ExplodedNode *n,
const Stmt *arg)
: BuiltinBugReport(bt, desc, n), Arg(arg) {}
-
+
ArgReport(BugType& bt, const char *shortDesc, const char *desc,
ExplodedNode *n, const Stmt *arg)
- : BuiltinBugReport(bt, shortDesc, desc, n), Arg(arg) {}
-
- const Stmt *getArg() const { return Arg; }
+ : BuiltinBugReport(bt, shortDesc, desc, n), Arg(arg) {}
+
+ const Stmt *getArg() const { return Arg; }
};
class VISIBILITY_HIDDEN BadArg : public BuiltinBug {
-public:
- BadArg(GRExprEngine* eng=0) : BuiltinBug(eng,"Uninitialized argument",
+public:
+ BadArg(GRExprEngine* eng=0) : BuiltinBug(eng,"Uninitialized argument",
"Pass-by-value argument in function call is undefined.") {}
BadArg(GRExprEngine* eng, const char* d)
: BuiltinBug(eng,"Uninitialized argument", d) {}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, static_cast<ArgReport*>(R)->getArg(),
N);
- }
+ }
};
-
+
class VISIBILITY_HIDDEN BadMsgExprArg : public BadArg {
public:
- BadMsgExprArg(GRExprEngine* eng)
+ BadMsgExprArg(GRExprEngine* eng)
: BadArg(eng,"Pass-by-value argument in message expression is undefined"){}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::UndefArgsTy::iterator I=Eng.msg_expr_undef_arg_begin(),
- E = Eng.msg_expr_undef_arg_end(); I!=E; ++I) {
+ E = Eng.msg_expr_undef_arg_end(); I!=E; ++I) {
// Generate a report for this bug.
ArgReport *report = new ArgReport(*this, desc.c_str(), I->first,
I->second);
report->addRange(I->second->getSourceRange());
BR.EmitReport(report);
- }
- }
+ }
+ }
};
-
+
class VISIBILITY_HIDDEN BadReceiver : public BuiltinBug {
-public:
+public:
BadReceiver(GRExprEngine* eng)
: BuiltinBug(eng,"Uninitialized receiver",
"Receiver in message expression is an uninitialized value") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::ErrorNodes::iterator I=Eng.undef_receivers_begin(),
End = Eng.undef_receivers_end(); I!=End; ++I) {
-
+
// Generate a report for this bug.
BuiltinBugReport *report = new BuiltinBugReport(*this, desc.c_str(), *I);
ExplodedNode* N = *I;
@@ -300,14 +300,14 @@
const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
- }
+ }
};
class VISIBILITY_HIDDEN RetStack : public BuiltinBug {
public:
RetStack(GRExprEngine* eng)
: BuiltinBug(eng, "Return of address to stack-allocated memory") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::ret_stackaddr_iterator I=Eng.ret_stackaddr_begin(),
End = Eng.ret_stackaddr_end(); I!=End; ++I) {
@@ -316,42 +316,42 @@
const Stmt *S = cast<PostStmt>(N->getLocation()).getStmt();
const Expr* E = cast<ReturnStmt>(S)->getRetValue();
assert(E && "Return expression cannot be NULL");
-
+
// Get the value associated with E.
loc::MemRegionVal V = cast<loc::MemRegionVal>(N->getState()->getSVal(E));
-
+
// Generate a report for this bug.
std::string buf;
llvm::raw_string_ostream os(buf);
SourceRange R;
-
+
// Check if the region is a compound literal.
- if (const CompoundLiteralRegion* CR =
+ if (const CompoundLiteralRegion* CR =
dyn_cast<CompoundLiteralRegion>(V.getRegion())) {
-
+
const CompoundLiteralExpr* CL = CR->getLiteralExpr();
os << "Address of stack memory associated with a compound literal "
"declared on line "
<< BR.getSourceManager()
.getInstantiationLineNumber(CL->getLocStart())
<< " returned.";
-
+
R = CL->getSourceRange();
}
else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(V.getRegion())) {
const Expr* ARE = AR->getExpr();
SourceLocation L = ARE->getLocStart();
R = ARE->getSourceRange();
-
+
os << "Address of stack memory allocated by call to alloca() on line "
<< BR.getSourceManager().getInstantiationLineNumber(L)
<< " returned.";
- }
- else {
+ }
+ else {
os << "Address of stack memory associated with local variable '"
<< V.getRegion()->getString() << "' returned.";
}
-
+
RangedBugReport *report = new RangedBugReport(*this, os.str().c_str(), N);
report->addRange(E->getSourceRange());
if (R.isValid()) report->addRange(R);
@@ -359,51 +359,51 @@
}
}
};
-
+
class VISIBILITY_HIDDEN RetUndef : public BuiltinBug {
public:
RetUndef(GRExprEngine* eng) : BuiltinBug(eng, "Uninitialized return value",
"Uninitialized or undefined value returned to caller.") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
Emit(BR, Eng.ret_undef_begin(), Eng.ret_undef_end());
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetRetValExpr(N), N);
- }
+ }
};
class VISIBILITY_HIDDEN UndefBranch : public BuiltinBug {
struct VISIBILITY_HIDDEN FindUndefExpr {
GRStateManager& VM;
const GRState* St;
-
+
FindUndefExpr(GRStateManager& V, const GRState* S) : VM(V), St(S) {}
-
- Expr* FindExpr(Expr* Ex) {
+
+ Expr* FindExpr(Expr* Ex) {
if (!MatchesCriteria(Ex))
return 0;
-
+
for (Stmt::child_iterator I=Ex->child_begin(), E=Ex->child_end();I!=E;++I)
if (Expr* ExI = dyn_cast_or_null<Expr>(*I)) {
Expr* E2 = FindExpr(ExI);
if (E2) return E2;
}
-
+
return Ex;
}
-
+
bool MatchesCriteria(Expr* Ex) { return St->getSVal(Ex).isUndef(); }
};
-
+
public:
UndefBranch(GRExprEngine *eng)
: BuiltinBug(eng,"Use of uninitialized value",
"Branch condition evaluates to an uninitialized value.") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::undef_branch_iterator I=Eng.undef_branches_begin(),
E=Eng.undef_branches_end(); I!=E; ++I) {
@@ -442,7 +442,7 @@
BR.EmitReport(R);
}
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
@@ -461,12 +461,12 @@
Emit(BR, Eng.explicit_oob_memacc_begin(), Eng.explicit_oob_memacc_end());
}
};
-
+
class VISIBILITY_HIDDEN BadSizeVLA : public BuiltinBug {
public:
BadSizeVLA(GRExprEngine* eng) :
BuiltinBug(eng, "Bad variable-length array (VLA) size") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::ErrorNodes::iterator
I = Eng.ExplicitBadSizedVLA.begin(),
@@ -475,26 +475,26 @@
// Determine whether this was a 'zero-sized' VLA or a VLA with an
// undefined size.
ExplodedNode* N = *I;
- PostStmt PS = cast<PostStmt>(N->getLocation());
+ PostStmt PS = cast<PostStmt>(N->getLocation());
const DeclStmt *DS = cast<DeclStmt>(PS.getStmt());
VarDecl* VD = cast<VarDecl>(*DS->decl_begin());
QualType T = Eng.getContext().getCanonicalType(VD->getType());
VariableArrayType* VT = cast<VariableArrayType>(T);
Expr* SizeExpr = VT->getSizeExpr();
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
os << "The expression used to specify the number of elements in the "
"variable-length array (VLA) '"
<< VD->getNameAsString() << "' evaluates to ";
-
+
bool isUndefined = N->getState()->getSVal(SizeExpr).isUndef();
-
+
if (isUndefined)
os << "an undefined or garbage value.";
else
os << "0. VLAs with no elements have undefined behavior.";
-
+
std::string shortBuf;
llvm::raw_string_ostream os_short(shortBuf);
os_short << "Variable-length array '" << VD->getNameAsString() << "' "
@@ -508,7 +508,7 @@
BR.EmitReport(report);
}
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
const ExplodedNode* N,
BuiltinBugReport *R) {
@@ -524,7 +524,7 @@
public CheckerVisitor<CheckAttrNonNull> {
BugType *BT;
-
+
public:
CheckAttrNonNull() : BT(0) {}
~CheckAttrNonNull() {}
@@ -537,73 +537,73 @@
void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
const GRState *originalState = state;
-
+
// Check if the callee has a 'nonnull' attribute.
SVal X = state->getSVal(CE->getCallee());
-
+
const FunctionDecl* FD = X.getAsFunctionDecl();
if (!FD)
return;
- const NonNullAttr* Att = FD->getAttr<NonNullAttr>();
+ const NonNullAttr* Att = FD->getAttr<NonNullAttr>();
if (!Att)
return;
-
+
// Iterate through the arguments of CE and check them for null.
unsigned idx = 0;
-
+
for (CallExpr::const_arg_iterator I=CE->arg_begin(), E=CE->arg_end(); I!=E;
++I, ++idx) {
-
+
if (!Att->isNonNull(idx))
continue;
-
+
const SVal &V = state->getSVal(*I);
const DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
-
+
if (!DV)
continue;
-
+
ConstraintManager &CM = C.getConstraintManager();
const GRState *stateNotNull, *stateNull;
llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV);
-
+
if (stateNull && !stateNotNull) {
// Generate an error node. Check for a null node in case
// we cache out.
if (ExplodedNode *errorNode = C.GenerateNode(CE, stateNull, true)) {
-
+
// Lazily allocate the BugType object if it hasn't already been
// created. Ownership is transferred to the BugReporter object once
// the BugReport is passed to 'EmitWarning'.
if (!BT)
BT = new BugType("Argument with 'nonnull' attribute passed null",
"API");
-
+
EnhancedBugReport *R =
new EnhancedBugReport(*BT,
"Null pointer passed as an argument to a "
"'nonnull' parameter", errorNode);
-
+
// Highlight the range of the argument that was null.
const Expr *arg = *I;
R->addRange(arg->getSourceRange());
R->addVisitorCreator(registerTrackNullOrUndefValue, arg);
-
+
// Emit the bug report.
C.EmitReport(R);
}
-
+
// Always return. Either we cached out or we just emitted an error.
return;
}
-
+
// If a pointer value passed the check we should assume that it is
// indeed not null from this point forward.
assert(stateNotNull);
state = stateNotNull;
}
-
+
// If we reach here all of the arguments passed the nonnull check.
// If 'state' has been updated generated a new node.
if (state != originalState)
@@ -614,7 +614,7 @@
// Undefined arguments checking.
namespace {
-class VISIBILITY_HIDDEN CheckUndefinedArg
+class VISIBILITY_HIDDEN CheckUndefinedArg
: public CheckerVisitor<CheckUndefinedArg> {
BadArg *BT;
@@ -690,7 +690,7 @@
void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
};
-void CheckBadDiv::PreVisitBinaryOperator(CheckerContext &C,
+void CheckBadDiv::PreVisitBinaryOperator(CheckerContext &C,
const BinaryOperator *B) {
BinaryOperator::Opcode Op = B->getOpcode();
if (Op != BinaryOperator::Div &&
@@ -719,19 +719,19 @@
// Handle the case where 'Denom' is UnknownVal.
const DefinedSVal *DV = dyn_cast<DefinedSVal>(&Denom);
- if (!DV)
+ if (!DV)
return;
// Check for divide by zero.
ConstraintManager &CM = C.getConstraintManager();
const GRState *stateNotZero, *stateZero;
llvm::tie(stateNotZero, stateZero) = CM.AssumeDual(C.getState(), *DV);
-
+
if (stateZero && !stateNotZero) {
if (ExplodedNode *N = C.GenerateNode(B, stateZero, true)) {
if (!BT)
BT = new DivZero();
-
+
C.EmitReport(new BuiltinBugReport(*BT, BT->getDescription().c_str(), N));
}
return;
@@ -764,7 +764,7 @@
BR.Register(new BadSizeVLA(this));
BR.Register(new NilReceiverStructRet(this));
BR.Register(new NilReceiverLargerThanVoidPtrRet(this));
-
+
// The following checks do not need to have their associated BugTypes
// explicitly registered with the BugReporter. If they issue any BugReports,
// their associated BugType will get registered with the BugReporter
diff --git a/lib/Analysis/GRState.cpp b/lib/Analysis/GRState.cpp
index 74b493d..f269824 100644
--- a/lib/Analysis/GRState.cpp
+++ b/lib/Analysis/GRState.cpp
@@ -27,7 +27,7 @@
for (std::vector<GRState::Printer*>::iterator I=Printers.begin(),
E=Printers.end(); I!=E; ++I)
delete *I;
-
+
for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
I!=E; ++I)
I->second.second(I->second.first);
@@ -59,13 +59,13 @@
const GRState *GRState::unbindLoc(Loc LV) const {
Store OldStore = getStore();
Store NewStore = getStateManager().StoreMgr->Remove(OldStore, LV);
-
+
if (NewStore == OldStore)
return this;
-
+
GRState NewSt = *this;
NewSt.St = NewStore;
- return getStateManager().getPersistentState(NewSt);
+ return getStateManager().getPersistentState(NewSt);
}
SVal GRState::getSValAsScalarOrLoc(const MemRegion *R) const {
@@ -87,7 +87,7 @@
const GRState *GRState::BindExpr(const Stmt* Ex, SVal V, bool Invalidate) const{
Environment NewEnv = getStateManager().EnvMgr.BindExpr(Env, Ex, V,
- Invalidate);
+ Invalidate);
if (NewEnv == Env)
return this;
@@ -98,7 +98,7 @@
const GRState* GRStateManager::getInitialState(const LocationContext *InitLoc) {
GRState State(this,
- EnvMgr.getInitialEnvironment(InitLoc->getAnalysisContext()),
+ EnvMgr.getInitialEnvironment(InitLoc->getAnalysisContext()),
StoreMgr->getInitialStore(InitLoc),
GDMFactory.GetEmptyMap());
@@ -106,16 +106,16 @@
}
const GRState* GRStateManager::getPersistentState(GRState& State) {
-
+
llvm::FoldingSetNodeID ID;
- State.Profile(ID);
+ State.Profile(ID);
void* InsertPos;
-
+
if (GRState* I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
return I;
-
+
GRState* I = (GRState*) Alloc.Allocate<GRState>();
- new (I) GRState(State);
+ new (I) GRState(State);
StateSet.InsertNode(I, InsertPos);
return I;
}
@@ -131,32 +131,32 @@
//===----------------------------------------------------------------------===//
void GRState::print(llvm::raw_ostream& Out, const char* nl,
- const char* sep) const {
+ const char* sep) const {
// Print the store.
GRStateManager &Mgr = getStateManager();
Mgr.getStoreManager().print(getStore(), Out, nl, sep);
-
+
CFG &C = *getAnalysisContext().getCFG();
-
+
// Print Subexpression bindings.
bool isFirst = true;
-
- for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
+
+ for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
if (C.isBlkExpr(I.getKey()))
continue;
-
+
if (isFirst) {
Out << nl << nl << "Sub-Expressions:" << nl;
isFirst = false;
}
else { Out << nl; }
-
+
Out << " (" << (void*) I.getKey() << ") ";
LangOptions LO; // FIXME.
I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
Out << " : " << I.getData();
}
-
+
// Print block-expression bindings.
isFirst = true;
@@ -169,15 +169,15 @@
isFirst = false;
}
else { Out << nl; }
-
+
Out << " (" << (void*) I.getKey() << ") ";
LangOptions LO; // FIXME.
I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
Out << " : " << I.getData();
}
-
+
Mgr.getConstraintManager().print(this, Out, nl, sep);
-
+
// Print checker-specific data.
for (std::vector<Printer*>::iterator I = Mgr.Printers.begin(),
E = Mgr.Printers.end(); I != E; ++I) {
@@ -205,23 +205,23 @@
GRStateManager::FindGDMContext(void* K,
void* (*CreateContext)(llvm::BumpPtrAllocator&),
void (*DeleteContext)(void*)) {
-
+
std::pair<void*, void (*)(void*)>& p = GDMContexts[K];
if (!p.first) {
p.first = CreateContext(Alloc);
p.second = DeleteContext;
}
-
+
return p.first;
}
const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){
GRState::GenericDataMap M1 = St->getGDM();
GRState::GenericDataMap M2 = GDMFactory.Add(M1, Key, Data);
-
+
if (M1 == M2)
return St;
-
+
GRState NewSt = *St;
NewSt.GDM = M2;
return getPersistentState(NewSt);
@@ -240,14 +240,14 @@
SymbolVisitor &visitor;
llvm::OwningPtr<SubRegionMap> SRM;
public:
-
+
ScanReachableSymbols(const GRState *st, SymbolVisitor& v)
: state(st), visitor(v) {}
-
+
bool scan(nonloc::CompoundVal val);
bool scan(SVal val);
bool scan(const MemRegion *R);
-
+
// From SubRegionMap::Visitor.
bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) {
return scan(SubRegion);
@@ -262,44 +262,44 @@
return true;
}
-
+
bool ScanReachableSymbols::scan(SVal val) {
if (loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&val))
return scan(X->getRegion());
if (SymbolRef Sym = val.getAsSymbol())
return visitor.VisitSymbol(Sym);
-
+
if (nonloc::CompoundVal *X = dyn_cast<nonloc::CompoundVal>(&val))
return scan(*X);
-
+
return true;
}
-
+
bool ScanReachableSymbols::scan(const MemRegion *R) {
if (isa<MemSpaceRegion>(R) || visited.count(R))
return true;
-
+
visited.insert(R);
// If this is a symbolic region, visit the symbol for the region.
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
if (!visitor.VisitSymbol(SR->getSymbol()))
return false;
-
+
// If this is a subregion, also visit the parent regions.
if (const SubRegion *SR = dyn_cast<SubRegion>(R))
if (!scan(SR->getSuperRegion()))
return false;
-
+
// Now look at the binding to this region (if any).
if (!scan(state->getSValAsScalarOrLoc(R)))
return false;
-
+
// Now look at the subregions.
if (!SRM.get())
SRM.reset(state->getStateManager().getStoreManager().getSubRegionMap(state));
-
+
return SRM->iterSubRegions(R, *this);
}
@@ -314,21 +314,21 @@
bool GRStateManager::isEqual(const GRState* state, const Expr* Ex,
const llvm::APSInt& Y) {
-
+
SVal V = state->getSVal(Ex);
-
+
if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
return X->getValue() == Y;
if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
return X->getValue() == Y;
-
+
if (SymbolRef Sym = V.getAsSymbol())
return ConstraintMgr->isEqual(state, Sym, Y);
return false;
}
-
+
bool GRStateManager::isEqual(const GRState* state, const Expr* Ex, uint64_t x) {
return isEqual(state, Ex, getBasicVals().getValue(x, Ex->getType()));
}
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index c9828ce..4d96c8f 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -29,35 +29,35 @@
//===----------------------------------------------------------------------===//
// Useful constants.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
static const bool Alive = true;
-static const bool Dead = false;
+static const bool Dead = false;
//===----------------------------------------------------------------------===//
// Dataflow initialization logic.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
-class VISIBILITY_HIDDEN RegisterDecls
+class VISIBILITY_HIDDEN RegisterDecls
: public CFGRecStmtDeclVisitor<RegisterDecls> {
-
+
LiveVariables::AnalysisDataTy& AD;
-
+
typedef llvm::SmallVector<VarDecl*, 20> AlwaysLiveTy;
AlwaysLiveTy AlwaysLive;
-
+
public:
RegisterDecls(LiveVariables::AnalysisDataTy& ad) : AD(ad) {}
~RegisterDecls() {
AD.AlwaysLive.resetValues(AD);
-
+
for (AlwaysLiveTy::iterator I = AlwaysLive.begin(), E = AlwaysLive.end();
- I != E; ++ I)
- AD.AlwaysLive(*I, AD) = Alive;
+ I != E; ++ I)
+ AD.AlwaysLive(*I, AD) = Alive;
}
void VisitImplicitParamDecl(ImplicitParamDecl* IPD) {
@@ -68,12 +68,12 @@
void VisitVarDecl(VarDecl* VD) {
// Register the VarDecl for tracking.
AD.Register(VD);
-
+
// Does the variable have global storage? If so, it is always live.
if (VD->hasGlobalStorage())
- AlwaysLive.push_back(VD);
+ AlwaysLive.push_back(VD);
}
-
+
CFG& getCFG() { return AD.getCFG(); }
};
} // end anonymous namespace
@@ -82,14 +82,14 @@
// Register all referenced VarDecls.
getAnalysisData().setCFG(cfg);
getAnalysisData().setContext(Ctx);
-
+
RegisterDecls R(getAnalysisData());
cfg.VisitBlockStmts(R);
}
//===----------------------------------------------------------------------===//
// Transfer functions.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
@@ -101,85 +101,85 @@
LiveVariables::ValTy& getVal() { return LiveState; }
CFG& getCFG() { return AD.getCFG(); }
-
+
void VisitDeclRefExpr(DeclRefExpr* DR);
void VisitBinaryOperator(BinaryOperator* B);
void VisitAssign(BinaryOperator* B);
void VisitDeclStmt(DeclStmt* DS);
void BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
void VisitUnaryOperator(UnaryOperator* U);
- void Visit(Stmt *S);
- void VisitTerminator(CFGBlock* B);
-
+ void Visit(Stmt *S);
+ void VisitTerminator(CFGBlock* B);
+
void SetTopValue(LiveVariables::ValTy& V) {
V = AD.AlwaysLive;
}
-
+
};
-
+
void TransferFuncs::Visit(Stmt *S) {
-
+
if (S == getCurrentBlkStmt()) {
-
+
if (AD.Observer)
AD.Observer->ObserveStmt(S,AD,LiveState);
-
+
if (getCFG().isBlkExpr(S)) LiveState(S,AD) = Dead;
StmtVisitor<TransferFuncs,void>::Visit(S);
}
else if (!getCFG().isBlkExpr(S)) {
-
+
if (AD.Observer)
AD.Observer->ObserveStmt(S,AD,LiveState);
-
+
StmtVisitor<TransferFuncs,void>::Visit(S);
-
+
}
else {
// For block-level expressions, mark that they are live.
LiveState(S,AD) = Alive;
}
}
-
+
void TransferFuncs::VisitTerminator(CFGBlock* B) {
-
+
const Stmt* E = B->getTerminatorCondition();
if (!E)
return;
-
+
assert (getCFG().isBlkExpr(E));
LiveState(E, AD) = Alive;
}
void TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
- if (VarDecl* V = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VarDecl* V = dyn_cast<VarDecl>(DR->getDecl()))
LiveState(V,AD) = Alive;
}
-
-void TransferFuncs::VisitBinaryOperator(BinaryOperator* B) {
+
+void TransferFuncs::VisitBinaryOperator(BinaryOperator* B) {
if (B->isAssignmentOp()) VisitAssign(B);
else VisitStmt(B);
}
void
TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
-
+
// This is a block-level expression. Its value is 'dead' before this point.
LiveState(S, AD) = Dead;
// This represents a 'use' of the collection.
Visit(S->getCollection());
-
+
// This represents a 'kill' for the variable.
Stmt* Element = S->getElement();
DeclRefExpr* DR = 0;
VarDecl* VD = 0;
-
+
if (DeclStmt* DS = dyn_cast<DeclStmt>(Element))
VD = cast<VarDecl>(DS->getSingleDecl());
else {
- Expr* ElemExpr = cast<Expr>(Element)->IgnoreParens();
+ Expr* ElemExpr = cast<Expr>(Element)->IgnoreParens();
if ((DR = dyn_cast<DeclRefExpr>(ElemExpr)))
VD = cast<VarDecl>(DR->getDecl());
else {
@@ -194,10 +194,10 @@
}
}
-
+
void TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
Expr *E = U->getSubExpr();
-
+
switch (U->getOpcode()) {
case UnaryOperator::PostInc:
case UnaryOperator::PostDec:
@@ -206,7 +206,7 @@
// Walk through the subexpressions, blasting through ParenExprs
// until we either find a DeclRefExpr or some non-DeclRefExpr
// expression.
- if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParens()))
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParens()))
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Treat the --/++ operator as a kill.
if (AD.Observer) { AD.Observer->ObserverKill(DR); }
@@ -215,24 +215,24 @@
}
// Fall-through.
-
+
default:
return Visit(E);
}
}
-
-void TransferFuncs::VisitAssign(BinaryOperator* B) {
+
+void TransferFuncs::VisitAssign(BinaryOperator* B) {
Expr* LHS = B->getLHS();
// Assigning to a variable?
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParens())) {
-
+
// Update liveness inforamtion.
unsigned bit = AD.getIdx(DR->getDecl());
LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
-
+
if (AD.Observer) { AD.Observer->ObserverKill(DR); }
-
+
// Handle things like +=, etc., which also generate "uses"
// of a variable. Do this just by visiting the subexpression.
if (B->getOpcode() != BinaryOperator::Assign)
@@ -240,7 +240,7 @@
}
else // Not assigning to a variable. Process LHS as usual.
Visit(LHS);
-
+
Visit(B->getRHS());
}
@@ -255,44 +255,44 @@
// transfer function for this expression first.
if (Expr* Init = VD->getInit())
Visit(Init);
-
+
if (const VariableArrayType* VT =
AD.getContext().getAsVariableArrayType(VD->getType())) {
StmtIterator I(const_cast<VariableArrayType*>(VT));
- StmtIterator E;
+ StmtIterator E;
for (; I != E; ++I) Visit(*I);
}
-
+
// Update liveness information by killing the VarDecl.
unsigned bit = AD.getIdx(VD);
LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
}
}
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Merge operator: if something is live on any successor block, it is live
// in the current block (a set union).
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
struct Merge {
- typedef StmtDeclBitVector_Types::ValTy ValTy;
-
+ typedef StmtDeclBitVector_Types::ValTy ValTy;
+
void operator()(ValTy& Dst, const ValTy& Src) {
Dst.OrDeclBits(Src);
Dst.OrBlkExprBits(Src);
}
};
-
+
typedef DataflowSolver<LiveVariables, TransferFuncs, Merge> Solver;
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// External interface to run Liveness analysis.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
void LiveVariables::runOnCFG(CFG& cfg) {
Solver S(*this);
@@ -337,22 +337,22 @@
void LiveVariables::dumpLiveness(const ValTy& V, SourceManager& SM) const {
const AnalysisDataTy& AD = getAnalysisData();
-
+
for (AnalysisDataTy::decl_iterator I = AD.begin_decl(),
E = AD.end_decl(); I!=E; ++I)
- if (V.getDeclBit(I->second)) {
+ if (V.getDeclBit(I->second)) {
fprintf(stderr, " %s <", I->first->getIdentifier()->getName());
I->first->getLocation().dump(SM);
fprintf(stderr, ">\n");
}
-}
+}
void LiveVariables::dumpBlockLiveness(SourceManager& M) const {
for (BlockDataMapTy::iterator I = getBlockDataMap().begin(),
E = getBlockDataMap().end(); I!=E; ++I) {
fprintf(stderr, "\n[ B%d (live variables at block exit) ]\n",
I->first->getBlockID());
-
+
dumpLiveness(I->second,M);
}
diff --git a/lib/Analysis/MemRegion.cpp b/lib/Analysis/MemRegion.cpp
index bc51f50..353e632 100644
--- a/lib/Analysis/MemRegion.cpp
+++ b/lib/Analysis/MemRegion.cpp
@@ -55,8 +55,8 @@
ID.AddInteger((unsigned)getKind());
}
-void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
- const StringLiteral* Str,
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const StringLiteral* Str,
const MemRegion* superRegion) {
ID.AddInteger((unsigned) StringRegionKind);
ID.AddPointer(Str);
@@ -114,7 +114,7 @@
}
void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
- QualType ElementType, SVal Idx,
+ QualType ElementType, SVal Idx,
const MemRegion* superRegion) {
ID.AddInteger(MemRegion::ElementRegionKind);
ID.Add(ElementType);
@@ -182,7 +182,7 @@
os << "ivar{" << superRegion << ',' << getDecl()->getNameAsString() << '}';
}
-void StringRegion::dumpToStream(llvm::raw_ostream& os) const {
+void StringRegion::dumpToStream(llvm::raw_ostream& os) const {
Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOptions()));
}
@@ -206,8 +206,8 @@
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
-MemSpaceRegion* MemRegionManager::LazyAllocate(MemSpaceRegion*& region) {
- if (!region) {
+MemSpaceRegion* MemRegionManager::LazyAllocate(MemSpaceRegion*& region) {
+ if (!region) {
region = (MemSpaceRegion*) A.Allocate<MemSpaceRegion>();
new (region) MemSpaceRegion(this);
}
@@ -249,13 +249,13 @@
VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
-
+
// FIXME: Once we implement scope handling, we will need to properly lookup
// 'D' to the proper LocationContext. For now, just strip down to the
// StackFrame.
while (!isa<StackFrameContext>(LC))
LC = LC->getParent();
-
+
return getRegion<VarRegion>(D, LC);
}
@@ -320,12 +320,12 @@
const MemSpaceRegion *MemRegion::getMemorySpace() const {
const MemRegion *R = this;
const SubRegion* SR = dyn_cast<SubRegion>(this);
-
+
while (SR) {
R = SR->getSuperRegion();
SR = dyn_cast<SubRegion>(R);
}
-
+
return dyn_cast<MemSpaceRegion>(R);
}
@@ -365,7 +365,7 @@
bool MemRegion::hasParametersStorage() const {
if (const MemSpaceRegion *MS = getMemorySpace())
return MS == getMemRegionManager()->getStackArgumentsRegion();
-
+
return false;
}
@@ -385,7 +385,7 @@
const MemRegion *MemRegion::getBaseRegion() const {
const MemRegion *R = this;
while (true) {
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
// FIXME: generalize. Essentially we want to strip away ElementRegions
// that were layered on a symbolic region because of casts. We only
// want to strip away ElementRegions, however, where the index is 0.
@@ -418,27 +418,27 @@
const ElementRegion *ER = this;
const MemRegion *superR = NULL;
ASTContext &C = getContext();
-
+
// FIXME: Handle multi-dimensional arrays.
while (ER) {
superR = ER->getSuperRegion();
-
+
// FIXME: generalize to symbolic offsets.
SVal index = ER->getIndex();
if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
// Update the offset.
int64_t i = CI->getValue().getSExtValue();
-
+
if (i != 0) {
QualType elemType = ER->getElementType();
-
+
// If we are pointing to an incomplete type, go no further.
if (!IsCompleteType(C, elemType)) {
superR = ER;
break;
}
-
+
int64_t size = (int64_t) (C.getTypeSize(elemType) / 8);
offset += (i * size);
}
@@ -447,10 +447,10 @@
ER = dyn_cast<ElementRegion>(superR);
continue;
}
-
+
return NULL;
}
-
+
assert(superR && "super region cannot be NULL");
return RegionRawOffset(superR, offset);
}
diff --git a/lib/Analysis/PathDiagnostic.cpp b/lib/Analysis/PathDiagnostic.cpp
index a608ce0..1c2f6bf 100644
--- a/lib/Analysis/PathDiagnostic.cpp
+++ b/lib/Analysis/PathDiagnostic.cpp
@@ -27,7 +27,7 @@
for (const_iterator I = begin(), E = end(); I!=E; ++I) {
if (isa<PathDiagnosticEventPiece>(*I))
return true;
-
+
if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
if (MP->containsEvent())
return true;
@@ -38,14 +38,14 @@
static size_t GetNumCharsToLastNonPeriod(const char *s) {
const char *start = s;
- const char *lastNonPeriod = 0;
+ const char *lastNonPeriod = 0;
for ( ; *s != '\0' ; ++s)
if (*s != '.') lastNonPeriod = s;
-
+
if (!lastNonPeriod)
return 0;
-
+
return (lastNonPeriod - start) + 1;
}
@@ -84,7 +84,7 @@
if (deletePieces)
for (iterator I=begin(), E=end(); I!=E; ++I)
delete &*I;
-
+
path.clear();
}
@@ -97,7 +97,7 @@
Category(category, GetNumCharsToLastNonPeriod(category)) {}
PathDiagnostic::PathDiagnostic(const std::string& bugtype,
- const std::string& desc,
+ const std::string& desc,
const std::string& category)
: Size(0),
BugType(bugtype, 0, GetNumCharsToLastNonPeriod(bugtype)),
@@ -106,11 +106,11 @@
void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
const DiagnosticInfo &Info) {
-
+
// Create a PathDiagnostic with a single piece.
-
+
PathDiagnostic* D = new PathDiagnostic();
-
+
const char *LevelStr;
switch (DiagLevel) {
default:
@@ -124,18 +124,18 @@
llvm::SmallString<100> StrC;
StrC += LevelStr;
Info.FormatDiagnostic(StrC);
-
+
PathDiagnosticPiece *P =
new PathDiagnosticEventPiece(Info.getLocation(),
std::string(StrC.begin(), StrC.end()));
-
+
for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
P->addRange(Info.getRange(i));
for (unsigned i = 0, e = Info.getNumCodeModificationHints(); i != e; ++i)
P->addCodeModificationHint(Info.getCodeModificationHint(i));
D->push_front(P);
- HandlePathDiagnostic(D);
+ HandlePathDiagnostic(D);
}
//===----------------------------------------------------------------------===//
@@ -155,7 +155,7 @@
case DeclK:
return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
}
-
+
return FullSourceLoc(R.getBegin(), const_cast<SourceManager&>(*SM));
}
@@ -178,7 +178,7 @@
if (DS->isSingleDecl()) {
// Should always be the case, but we'll be defensive.
return SourceRange(DS->getLocStart(),
- DS->getSingleDecl()->getLocation());
+ DS->getSingleDecl()->getLocation());
}
break;
}
@@ -197,7 +197,7 @@
return SourceRange(L, L);
}
}
-
+
return S->getSourceRange();
}
case DeclK:
@@ -219,7 +219,7 @@
return PathDiagnosticRange(SourceRange(L, L), true);
}
}
-
+
return R;
}
diff --git a/lib/Analysis/RangeConstraintManager.cpp b/lib/Analysis/RangeConstraintManager.cpp
index 079462e..73b445e 100644
--- a/lib/Analysis/RangeConstraintManager.cpp
+++ b/lib/Analysis/RangeConstraintManager.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines RangeConstraintManager, a class that tracks simple
+// This file defines RangeConstraintManager, a class that tracks simple
// equality and inequality constraints on symbolic values of GRState.
//
//===----------------------------------------------------------------------===//
@@ -66,7 +66,7 @@
// consistent (instead of comparing by pointer values) and can potentially
// be used to speed up some of the operations in RangeSet.
static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
- return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
+ return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
*lhs.second < *rhs.second);
}
};
@@ -78,7 +78,7 @@
typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
PrimRangeSet ranges; // no need to make const, since it is an
// ImmutableSet - this allows default operator=
- // to work.
+ // to work.
public:
typedef PrimRangeSet::Factory Factory;
typedef PrimRangeSet::iterator iterator;
@@ -88,13 +88,13 @@
iterator begin() const { return ranges.begin(); }
iterator end() const { return ranges.end(); }
-
+
bool isEmpty() const { return ranges.isEmpty(); }
-
+
/// Construct a new RangeSet representing '{ [from, to] }'.
RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
: ranges(F.Add(F.GetEmptySet(), Range(from, to))) {}
-
+
/// Profile - Generates a hash profile of this RangeSet for use
/// by FoldingSet.
void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
@@ -122,7 +122,7 @@
/// value be not be equal to V.
RangeSet AddNE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
PrimRangeSet newRanges = ranges;
-
+
// FIXME: We can perhaps enhance ImmutableSet to do this search for us
// in log(N) time using the sorted property of the internal AVL tree.
for (iterator i = begin(), e = end(); i != e; ++i) {
@@ -134,11 +134,11 @@
newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
if (V != i->To())
newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
- // All of the ranges are non-overlapping, so we can stop.
+ // All of the ranges are non-overlapping, so we can stop.
break;
}
}
-
+
return newRanges;
}
@@ -153,7 +153,7 @@
else if (i->To() < V)
newRanges = F.Add(newRanges, *i);
}
-
+
return newRanges;
}
@@ -168,7 +168,7 @@
else if (i->To() <= V)
newRanges = F.Add(newRanges, *i);
}
-
+
return newRanges;
}
@@ -181,7 +181,7 @@
else if (i->From() > V)
newRanges = F.Add(newRanges, *i);
}
-
+
return newRanges;
}
@@ -208,13 +208,13 @@
isFirst = false;
else
os << ", ";
-
+
os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
<< ']';
}
- os << " }";
+ os << " }";
}
-
+
bool operator==(const RangeSet &other) const {
return ranges == other.ranges;
}
@@ -227,13 +227,13 @@
template<>
struct GRStateTrait<ConstraintRange>
: public GRStatePartialTrait<ConstraintRangeTy> {
- static inline void* GDMIndex() { return &ConstraintRangeIndex; }
+ static inline void* GDMIndex() { return &ConstraintRangeIndex; }
};
-}
-
+}
+
namespace {
class VISIBILITY_HIDDEN RangeConstraintManager : public SimpleConstraintManager{
- RangeSet GetRange(const GRState *state, SymbolRef sym);
+ RangeSet GetRange(const GRState *state, SymbolRef sym);
public:
RangeConstraintManager() {}
@@ -256,7 +256,7 @@
const llvm::APSInt& V);
const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const;
-
+
// FIXME: Refactor into SimpleConstraintManager?
bool isEqual(const GRState* St, SymbolRef sym, const llvm::APSInt& V) const {
const llvm::APSInt *i = getSymVal(St, sym);
@@ -265,7 +265,7 @@
const GRState* RemoveDeadBindings(const GRState* St, SymbolReaper& SymReaper);
- void print(const GRState* St, llvm::raw_ostream& Out,
+ void print(const GRState* St, llvm::raw_ostream& Out,
const char* nl, const char *sep);
private:
@@ -294,11 +294,11 @@
ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>();
for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
- SymbolRef sym = I.getKey();
+ SymbolRef sym = I.getKey();
if (SymReaper.maybeDead(sym))
CR = CRFactory.Remove(CR, sym);
}
-
+
return state->set<ConstraintRange>(CR);
}
@@ -310,11 +310,11 @@
RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
return *V;
-
+
// Lazily generate a new RangeSet representing all possible values for the
// given symbol type.
QualType T = state->getSymbolManager().getType(sym);
- BasicValueFactory& BV = state->getBasicVals();
+ BasicValueFactory& BV = state->getBasicVals();
return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T));
}
@@ -341,16 +341,16 @@
// Pretty-printing.
//===------------------------------------------------------------------------===/
-void RangeConstraintManager::print(const GRState* St, llvm::raw_ostream& Out,
+void RangeConstraintManager::print(const GRState* St, llvm::raw_ostream& Out,
const char* nl, const char *sep) {
-
+
ConstraintRangeTy Ranges = St->get<ConstraintRange>();
-
+
if (Ranges.isEmpty())
return;
-
+
Out << nl << sep << "ranges of symbol values:";
-
+
for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
Out << nl << ' ' << I.getKey() << " : ";
I.getData().print(Out);
diff --git a/lib/Analysis/RegionStore.cpp b/lib/Analysis/RegionStore.cpp
index 5114035..4186690 100644
--- a/lib/Analysis/RegionStore.cpp
+++ b/lib/Analysis/RegionStore.cpp
@@ -41,21 +41,21 @@
namespace {
struct VISIBILITY_HIDDEN minimal_features_tag {};
-struct VISIBILITY_HIDDEN maximal_features_tag {};
-
+struct VISIBILITY_HIDDEN maximal_features_tag {};
+
class VISIBILITY_HIDDEN RegionStoreFeatures {
bool SupportsFields;
bool SupportsRemaining;
-
+
public:
RegionStoreFeatures(minimal_features_tag) :
SupportsFields(false), SupportsRemaining(false) {}
-
+
RegionStoreFeatures(maximal_features_tag) :
SupportsFields(true), SupportsRemaining(false) {}
-
+
void enableFields(bool t) { SupportsFields = t; }
-
+
bool supportsFields() const { return SupportsFields; }
bool supportsRemaining() const { return SupportsRemaining; }
};
@@ -107,7 +107,7 @@
static bool IsAnyPointerOrIntptr(QualType ty, ASTContext &Ctx) {
if (ty->isAnyPointerType())
return true;
-
+
return ty->isIntegerType() && ty->isScalarType() &&
Ctx.getTypeSize(ty) == Ctx.getTypeSize(Ctx.VoidPtrTy);
}
@@ -117,10 +117,10 @@
//===----------------------------------------------------------------------===//
namespace {
-
+
class VISIBILITY_HIDDEN RegionStoreSubRegionMap : public SubRegionMap {
typedef llvm::ImmutableSet<const MemRegion*> SetTy;
- typedef llvm::DenseMap<const MemRegion*, SetTy> Map;
+ typedef llvm::DenseMap<const MemRegion*, SetTy> Map;
SetTy::Factory F;
Map M;
public:
@@ -135,27 +135,27 @@
I->second = F.Add(I->second, SubRegion);
return false;
}
-
+
void process(llvm::SmallVectorImpl<const SubRegion*> &WL, const SubRegion *R);
-
+
~RegionStoreSubRegionMap() {}
-
+
bool iterSubRegions(const MemRegion* Parent, Visitor& V) const {
Map::iterator I = M.find(Parent);
if (I == M.end())
return true;
-
+
llvm::ImmutableSet<const MemRegion*> S = I->second;
for (llvm::ImmutableSet<const MemRegion*>::iterator SI=S.begin(),SE=S.end();
SI != SE; ++SI) {
if (!V.Visit(Parent, *SI))
return false;
}
-
+
return true;
}
-
+
typedef SetTy::iterator iterator;
std::pair<iterator, iterator> begin_end(const MemRegion *R) {
@@ -163,13 +163,13 @@
SetTy S = I == M.end() ? F.GetEmptySet() : I->second;
return std::make_pair(S.begin(), S.end());
}
-};
+};
class VISIBILITY_HIDDEN RegionStoreManager : public StoreManager {
const RegionStoreFeatures Features;
RegionBindings::Factory RBFactory;
public:
- RegionStoreManager(GRStateManager& mgr, const RegionStoreFeatures &f)
+ RegionStoreManager(GRStateManager& mgr, const RegionStoreFeatures &f)
: StoreManager(mgr),
Features(f),
RBFactory(mgr.getAllocator()) {}
@@ -177,14 +177,14 @@
virtual ~RegionStoreManager() {}
SubRegionMap *getSubRegionMap(const GRState *state);
-
+
RegionStoreSubRegionMap *getRegionStoreSubRegionMap(const GRState *state);
-
-
+
+
/// getDefaultBinding - Returns an SVal* representing an optional default
/// binding associated with a region and its subregions.
Optional<SVal> getDefaultBinding(const GRState *state, const MemRegion *R);
-
+
/// getLValueString - Returns an SVal representing the lvalue of a
/// StringLiteral. Within RegionStore a StringLiteral has an
/// associated StringRegion, and the lvalue of a StringLiteral is
@@ -202,11 +202,11 @@
/// VarRegion, and the lvalue of the variable is the lvalue of that region.
SVal getLValueVar(const GRState *ST, const VarDecl *VD,
const LocationContext *LC);
-
+
SVal getLValueIvar(const GRState *state, const ObjCIvarDecl* D, SVal Base);
SVal getLValueField(const GRState *state, SVal Base, const FieldDecl* D);
-
+
SVal getLValueFieldOrIvar(const GRState *state, SVal Base, const Decl* D);
SVal getLValueElement(const GRState *state, QualType elementType,
@@ -224,7 +224,7 @@
SVal EvalBinOp(const GRState *state, BinaryOperator::Opcode Op,Loc L,
NonLoc R, QualType resultTy);
- Store getInitialStore(const LocationContext *InitLoc) {
+ Store getInitialStore(const LocationContext *InitLoc) {
return RBFactory.GetEmptyMap().getRoot();
}
@@ -234,20 +234,20 @@
const GRState *InvalidateRegion(const GRState *state, const MemRegion *R,
const Expr *E, unsigned Count);
-
+
private:
void RemoveSubRegionBindings(RegionBindings &B,
RegionDefaultBindings &DVM,
RegionDefaultBindings::Factory &DVMFactory,
const MemRegion *R,
RegionStoreSubRegionMap &M);
-
-public:
+
+public:
const GRState *Bind(const GRState *state, Loc LV, SVal V);
const GRState *BindCompoundLiteral(const GRState *state,
const CompoundLiteralExpr* CL, SVal V);
-
+
const GRState *BindDecl(const GRState *ST, const VarDecl *VD,
const LocationContext *LC, SVal InitVal);
@@ -258,10 +258,10 @@
/// BindStruct - Bind a compound value to a structure.
const GRState *BindStruct(const GRState *, const TypedRegion* R, SVal V);
-
+
const GRState *BindArray(const GRState *state, const TypedRegion* R, SVal V);
-
- /// KillStruct - Set the entire struct to unknown.
+
+ /// KillStruct - Set the entire struct to unknown.
const GRState *KillStruct(const GRState *state, const TypedRegion* R);
const GRState *setDefaultValue(const GRState *state, const MemRegion* R, SVal V);
@@ -271,7 +271,7 @@
//===------------------------------------------------------------------===//
// Loading values from regions.
//===------------------------------------------------------------------===//
-
+
/// The high level logic for this method is this:
/// Retrieve (L)
/// if L has binding
@@ -289,28 +289,28 @@
SVal RetrieveElement(const GRState *state, const ElementRegion *R);
SVal RetrieveField(const GRState *state, const FieldRegion *R);
-
+
SVal RetrieveObjCIvar(const GRState *state, const ObjCIvarRegion *R);
-
+
SVal RetrieveVar(const GRState *state, const VarRegion *R);
-
+
SVal RetrieveLazySymbol(const GRState *state, const TypedRegion *R);
-
+
SVal RetrieveFieldOrElementCommon(const GRState *state, const TypedRegion *R,
QualType Ty, const MemRegion *superR);
-
+
/// Retrieve the values in a struct and return a CompoundVal, used when doing
- /// struct copy:
- /// struct s x, y;
+ /// struct copy:
+ /// struct s x, y;
/// x = y;
/// y's value is retrieved by this method.
SVal RetrieveStruct(const GRState *St, const TypedRegion* R);
-
+
SVal RetrieveArray(const GRState *St, const TypedRegion* R);
-
+
std::pair<const GRState*, const MemRegion*>
GetLazyBinding(RegionBindings B, const MemRegion *R);
-
+
const GRState* CopyLazyBindings(nonloc::LazyCompoundVal V,
const GRState *state,
const TypedRegion *R);
@@ -318,7 +318,7 @@
//===------------------------------------------------------------------===//
// State pruning.
//===------------------------------------------------------------------===//
-
+
/// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
/// It returns a new Store with these values removed.
void RemoveDeadBindings(GRState &state, Stmt* Loc, SymbolReaper& SymReaper,
@@ -327,14 +327,14 @@
//===------------------------------------------------------------------===//
// Region "extents".
//===------------------------------------------------------------------===//
-
+
const GRState *setExtent(const GRState *state, const MemRegion* R, SVal Extent);
SVal getSizeInElements(const GRState *state, const MemRegion* R);
//===------------------------------------------------------------------===//
// Utility methods.
//===------------------------------------------------------------------===//
-
+
static inline RegionBindings GetRegionBindings(Store store) {
return RegionBindings(static_cast<const RegionBindings::TreeTy*>(store));
}
@@ -350,7 +350,7 @@
BasicValueFactory& getBasicVals() {
return StateMgr.getBasicVals();
}
-
+
// FIXME: Remove.
ASTContext& getContext() { return StateMgr.getContext(); }
};
@@ -374,31 +374,31 @@
void
RegionStoreSubRegionMap::process(llvm::SmallVectorImpl<const SubRegion*> &WL,
- const SubRegion *R) {
+ const SubRegion *R) {
const MemRegion *superR = R->getSuperRegion();
if (add(superR, R))
if (const SubRegion *sr = dyn_cast<SubRegion>(superR))
- WL.push_back(sr);
+ WL.push_back(sr);
}
RegionStoreSubRegionMap*
RegionStoreManager::getRegionStoreSubRegionMap(const GRState *state) {
RegionBindings B = GetRegionBindings(state->getStore());
RegionStoreSubRegionMap *M = new RegionStoreSubRegionMap();
-
+
llvm::SmallVector<const SubRegion*, 10> WL;
for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I)
if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey()))
M->process(WL, R);
-
+
RegionDefaultBindings DVM = state->get<RegionDefaultValue>();
for (RegionDefaultBindings::iterator I = DVM.begin(), E = DVM.end();
- I != E; ++I)
+ I != E; ++I)
if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey()))
M->process(WL, R);
- // We also need to record in the subregion map "intermediate" regions that
+ // We also need to record in the subregion map "intermediate" regions that
// don't have direct bindings but are super regions of those that do.
while (!WL.empty()) {
const SubRegion *R = WL.back();
@@ -423,12 +423,12 @@
RegionDefaultBindings::Factory &DVMFactory,
const MemRegion *R,
RegionStoreSubRegionMap &M) {
-
+
RegionStoreSubRegionMap::iterator I, E;
for (llvm::tie(I, E) = M.begin_end(R); I != E; ++I)
RemoveSubRegionBindings(B, DVM, DVMFactory, *I, M);
-
+
B = RBFactory.Remove(B, R);
DVM = DVMFactory.Remove(DVM, R);
}
@@ -439,48 +439,48 @@
const Expr *E,
unsigned Count) {
ASTContext& Ctx = StateMgr.getContext();
-
+
// Strip away casts.
R = R->getBaseRegion();
// Remove the bindings to subregions.
- {
+ {
// Get the mapping of regions -> subregions.
llvm::OwningPtr<RegionStoreSubRegionMap>
SubRegions(getRegionStoreSubRegionMap(state));
-
+
RegionBindings B = GetRegionBindings(state->getStore());
- RegionDefaultBindings DVM = state->get<RegionDefaultValue>();
+ RegionDefaultBindings DVM = state->get<RegionDefaultValue>();
RegionDefaultBindings::Factory &DVMFactory =
state->get_context<RegionDefaultValue>();
-
- RemoveSubRegionBindings(B, DVM, DVMFactory, R, *SubRegions.get());
+
+ RemoveSubRegionBindings(B, DVM, DVMFactory, R, *SubRegions.get());
state = state->makeWithStore(B.getRoot())->set<RegionDefaultValue>(DVM);
}
if (!R->isBoundable())
return state;
-
+
if (isa<AllocaRegion>(R) || isa<SymbolicRegion>(R) ||
isa<ObjCObjectRegion>(R)) {
- // Invalidate the region by setting its default value to
+ // Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
SVal V = ValMgr.getConjuredSymbolVal(E, Ctx.IntTy, Count);
return setDefaultValue(state, R, V);
}
-
+
const TypedRegion *TR = cast<TypedRegion>(R);
QualType T = TR->getValueType(Ctx);
-
+
if (const RecordType *RT = T->getAsStructureType()) {
// FIXME: handle structs with default region value.
const RecordDecl *RD = RT->getDecl()->getDefinition(Ctx);
-
+
// No record definition. There is nothing we can do.
if (!RD)
return state;
-
- // Invalidate the region by setting its default value to
+
+ // Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
SVal V = ValMgr.getConjuredSymbolVal(E, Ctx.IntTy, Count);
return setDefaultValue(state, R, V);
@@ -492,7 +492,7 @@
Count);
return setDefaultValue(state, TR, V);
}
-
+
SVal V = ValMgr.getConjuredSymbolVal(E, T, Count);
assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
return Bind(state, ValMgr.makeLoc(TR), V);
@@ -506,7 +506,7 @@
/// StringLiteral. Within RegionStore a StringLiteral has an
/// associated StringRegion, and the lvalue of a StringLiteral is the
/// lvalue of that region.
-SVal RegionStoreManager::getLValueString(const GRState *St,
+SVal RegionStoreManager::getLValueString(const GRState *St,
const StringLiteral* S) {
return loc::MemRegionVal(MRMgr.getStringRegion(S));
}
@@ -525,7 +525,7 @@
/// is the lvalue of that region.
SVal
RegionStoreManager::getLValueCompoundLiteral(const GRState *St,
- const CompoundLiteralExpr* CL) {
+ const CompoundLiteralExpr* CL) {
return loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL));
}
@@ -567,7 +567,7 @@
assert(0 && "Unhandled Base.");
return Base;
}
-
+
// NOTE: We must have this check first because ObjCIvarDecl is a subclass
// of FieldDecl.
if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
@@ -595,10 +595,10 @@
// Pointer of any type can be cast and used as array base.
const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
-
+
// Convert the offset to the appropriate size and signedness.
Offset = ValMgr.convertToArrayIndex(Offset);
-
+
if (!ElemR) {
//
// If the base region is not an ElementRegion, create one.
@@ -612,23 +612,23 @@
return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
BaseRegion, getContext()));
}
-
+
SVal BaseIdx = ElemR->getIndex();
-
+
if (!isa<nonloc::ConcreteInt>(BaseIdx))
return UnknownVal();
-
+
const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue();
const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue();
assert(BaseIdxI.isSigned());
-
+
// Compute the new index.
SVal NewIdx = nonloc::ConcreteInt(getBasicVals().getValue(BaseIdxI + OffI));
-
+
// Construct the new ElementRegion.
const MemRegion *ArrayR = ElemR->getSuperRegion();
return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
- getContext()));
+ getContext()));
}
//===----------------------------------------------------------------------===//
@@ -637,12 +637,12 @@
SVal RegionStoreManager::getSizeInElements(const GRState *state,
const MemRegion *R) {
-
+
switch (R->getKind()) {
case MemRegion::MemSpaceRegionKind:
assert(0 && "Cannot index into a MemSpace");
- return UnknownVal();
-
+ return UnknownVal();
+
case MemRegion::CodeTextRegionKind:
// Technically this can happen if people do funny things with casts.
return UnknownVal();
@@ -656,23 +656,23 @@
case MemRegion::ObjCObjectRegionKind:
case MemRegion::SymbolicRegionKind:
return UnknownVal();
-
+
case MemRegion::StringRegionKind: {
const StringLiteral* Str = cast<StringRegion>(R)->getStringLiteral();
- // We intentionally made the size value signed because it participates in
+ // We intentionally made the size value signed because it participates in
// operations with signed indices.
return ValMgr.makeIntVal(Str->getByteLength()+1, false);
}
-
+
case MemRegion::VarRegionKind: {
const VarRegion* VR = cast<VarRegion>(R);
// Get the type of the variable.
QualType T = VR->getDesugaredValueType(getContext());
-
+
// FIXME: Handle variable-length arrays.
if (isa<VariableArrayType>(T))
return UnknownVal();
-
+
if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) {
// return the size as signed integer.
return ValMgr.makeIntVal(CAT->getSize(), false);
@@ -682,7 +682,7 @@
// essentially are arrays of size 1.
return ValMgr.makeIntVal(1, false);
}
-
+
case MemRegion::BEG_DECL_REGIONS:
case MemRegion::END_DECL_REGIONS:
case MemRegion::BEG_TYPED_REGIONS:
@@ -690,7 +690,7 @@
assert(0 && "Infeasible region");
return UnknownVal();
}
-
+
assert(0 && "Unreachable");
return UnknownVal();
}
@@ -714,29 +714,29 @@
SVal RegionStoreManager::ArrayToPointer(Loc Array) {
if (!isa<loc::MemRegionVal>(Array))
return UnknownVal();
-
+
const MemRegion* R = cast<loc::MemRegionVal>(&Array)->getRegion();
const TypedRegion* ArrayR = dyn_cast<TypedRegion>(R);
-
+
if (!ArrayR)
return UnknownVal();
-
+
// Strip off typedefs from the ArrayRegion's ValueType.
QualType T = ArrayR->getValueType(getContext())->getDesugaredType();
ArrayType *AT = cast<ArrayType>(T);
T = AT->getElementType();
-
+
SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
ElementRegion* ER = MRMgr.getElementRegion(T, ZeroIdx, ArrayR, getContext());
-
- return loc::MemRegionVal(ER);
+
+ return loc::MemRegionVal(ER);
}
//===----------------------------------------------------------------------===//
// Pointer arithmetic.
//===----------------------------------------------------------------------===//
-SVal RegionStoreManager::EvalBinOp(const GRState *state,
+SVal RegionStoreManager::EvalBinOp(const GRState *state,
BinaryOperator::Opcode Op, Loc L, NonLoc R,
QualType resultTy) {
// Assume the base location is MemRegionVal.
@@ -752,15 +752,15 @@
SymbolRef Sym = SR->getSymbol();
QualType T = Sym->getType(getContext());
QualType EleTy;
-
+
if (const PointerType *PT = T->getAs<PointerType>())
EleTy = PT->getPointeeType();
else
EleTy = T->getAsObjCObjectPointerType()->getPointeeType();
-
+
SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
ER = MRMgr.getElementRegion(EleTy, ZeroIdx, SR, getContext());
- break;
+ break;
}
case MemRegion::AllocaRegionKind: {
const AllocaRegion *AR = cast<AllocaRegion>(MR);
@@ -768,14 +768,14 @@
QualType EleTy = T->getAs<PointerType>()->getPointeeType();
SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, getContext());
- break;
+ break;
}
case MemRegion::ElementRegionKind: {
ER = cast<ElementRegion>(MR);
break;
}
-
+
// Not yet handled.
case MemRegion::VarRegionKind:
case MemRegion::StringRegionKind:
@@ -784,15 +784,15 @@
case MemRegion::ObjCObjectRegionKind:
case MemRegion::ObjCIvarRegionKind:
return UnknownVal();
-
+
case MemRegion::CodeTextRegionKind:
// Technically this can happen if people do funny things with casts.
return UnknownVal();
-
+
case MemRegion::MemSpaceRegionKind:
assert(0 && "Cannot perform pointer arithmetic on a MemSpace");
return UnknownVal();
-
+
case MemRegion::BEG_DECL_REGIONS:
case MemRegion::END_DECL_REGIONS:
case MemRegion::BEG_TYPED_REGIONS:
@@ -815,7 +815,7 @@
getContext());
return ValMgr.makeLoc(NewER);
}
-
+
return UnknownVal();
}
@@ -825,7 +825,7 @@
Optional<SVal> RegionStoreManager::getDefaultBinding(const GRState *state,
const MemRegion *R) {
-
+
if (R->isBoundable())
if (const TypedRegion *TR = dyn_cast<TypedRegion>(R))
if (TR->getValueType(getContext())->isUnionType())
@@ -837,21 +837,21 @@
static bool IsReinterpreted(QualType RTy, QualType UsedTy, ASTContext &Ctx) {
RTy = Ctx.getCanonicalType(RTy);
UsedTy = Ctx.getCanonicalType(UsedTy);
-
+
if (RTy == UsedTy)
return false;
-
-
+
+
// Recursively check the types. We basically want to see if a pointer value
- // is ever reinterpreted as a non-pointer, e.g. void** and intptr_t*
+ // is ever reinterpreted as a non-pointer, e.g. void** and intptr_t*
// represents a reinterpretation.
if (Loc::IsLocType(RTy) && Loc::IsLocType(UsedTy)) {
- const PointerType *PRTy = RTy->getAs<PointerType>();
+ const PointerType *PRTy = RTy->getAs<PointerType>();
const PointerType *PUsedTy = UsedTy->getAs<PointerType>();
return PUsedTy && PRTy &&
IsReinterpreted(PRTy->getPointeeType(),
- PUsedTy->getPointeeType(), Ctx);
+ PUsedTy->getPointeeType(), Ctx);
}
return true;
@@ -878,17 +878,17 @@
// c = *p;
if (isa<AllocaRegion>(MR))
return SValuator::CastResult(state, UnknownVal());
-
+
if (isa<SymbolicRegion>(MR)) {
ASTContext &Ctx = getContext();
SVal idx = ValMgr.makeZeroArrayIndex();
assert(!T.isNull());
MR = MRMgr.getElementRegion(T, idx, MR, Ctx);
}
-
+
if (isa<CodeTextRegion>(MR))
return SValuator::CastResult(state, UnknownVal());
-
+
// FIXME: Perhaps this method should just take a 'const MemRegion*' argument
// instead of 'Loc', and have the other Loc cases handled at a higher level.
const TypedRegion *R = cast<TypedRegion>(MR);
@@ -911,12 +911,12 @@
RTy = T;
assert(Ctx.getCanonicalType(RTy) ==
Ctx.getCanonicalType(R->getValueType(Ctx)));
- }
+ }
#endif
if (RTy->isStructureType())
return SValuator::CastResult(state, RetrieveStruct(state, R));
-
+
// FIXME: Handle unions.
if (RTy->isUnionType())
return SValuator::CastResult(state, UnknownVal());
@@ -933,10 +933,10 @@
if (const ElementRegion* ER = dyn_cast<ElementRegion>(R))
return CastRetrievedVal(RetrieveElement(state, ER), state, ER, T);
-
+
if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R))
return CastRetrievedVal(RetrieveObjCIvar(state, IVR), state, IVR, T);
-
+
if (const VarRegion *VR = dyn_cast<VarRegion>(R))
return CastRetrievedVal(RetrieveVar(state, VR), state, VR, T);
@@ -967,26 +967,26 @@
return SValuator::CastResult(state,
ValMgr.getRegionValueSymbolValOrUnknown(R, RTy));
}
-
+
std::pair<const GRState*, const MemRegion*>
RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R) {
if (const nonloc::LazyCompoundVal *V =
dyn_cast_or_null<nonloc::LazyCompoundVal>(B.lookup(R)))
return std::make_pair(V->getState(), V->getRegion());
-
+
if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
const std::pair<const GRState *, const MemRegion *> &X =
GetLazyBinding(B, ER->getSuperRegion());
-
+
if (X.first)
return std::make_pair(X.first,
MRMgr.getElementRegionWithSuper(ER, X.second));
- }
+ }
else if (const FieldRegion *FR = dyn_cast<FieldRegion>(R)) {
const std::pair<const GRState *, const MemRegion *> &X =
GetLazyBinding(B, FR->getSuperRegion());
-
+
if (X.first)
return std::make_pair(X.first,
MRMgr.getFieldRegionWithSuper(FR, X.second));
@@ -1010,23 +1010,23 @@
SVal Idx = R->getIndex();
if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Idx)) {
int64_t i = CI->getValue().getSExtValue();
- int64_t byteLength = Str->getByteLength();
+ int64_t byteLength = Str->getByteLength();
if (i > byteLength) {
// Buffer overflow checking in GRExprEngine should handle this case,
// but we shouldn't rely on it to not overflow here if that checking
// is disabled.
return UnknownVal();
- }
+ }
char c = (i == byteLength) ? '\0' : Str->getStrData()[i];
return ValMgr.makeIntVal(c, getContext().CharTy);
}
}
-
+
// Special case: the current region represents a cast and it and the super
// region both have pointer types or intptr_t types. If so, perform the
// retrieve from the super region and appropriately "cast" the value.
// This is needed to support OSAtomicCompareAndSwap and friends or other
- // loads that treat integers as pointers and vis versa.
+ // loads that treat integers as pointers and vis versa.
if (R->getIndex().isZeroConstant()) {
if (const TypedRegion *superTR = dyn_cast<TypedRegion>(superR)) {
ASTContext &Ctx = getContext();
@@ -1054,21 +1054,21 @@
// Handle LazyCompoundVals for the immediate super region. Other cases
// are handled in 'RetrieveFieldOrElementCommon'.
- if (const nonloc::LazyCompoundVal *LCV =
+ if (const nonloc::LazyCompoundVal *LCV =
dyn_cast<nonloc::LazyCompoundVal>(V)) {
-
+
R = MRMgr.getElementRegionWithSuper(R, LCV->getRegion());
return RetrieveElement(LCV->getState(), R);
}
-
+
// Other cases: give up.
return UnknownVal();
}
-
+
return RetrieveFieldOrElementCommon(state, R, R->getElementType(), superR);
}
-SVal RegionStoreManager::RetrieveField(const GRState* state,
+SVal RegionStoreManager::RetrieveField(const GRState* state,
const FieldRegion* R) {
// Check if the region has a binding.
@@ -1079,76 +1079,76 @@
QualType Ty = R->getValueType(getContext());
return RetrieveFieldOrElementCommon(state, R, Ty, R->getSuperRegion());
}
-
+
SVal RegionStoreManager::RetrieveFieldOrElementCommon(const GRState *state,
const TypedRegion *R,
QualType Ty,
const MemRegion *superR) {
- // At this point we have already checked in either RetrieveElement or
+ // At this point we have already checked in either RetrieveElement or
// RetrieveField if 'R' has a direct binding.
-
+
RegionBindings B = GetRegionBindings(state->getStore());
-
+
while (superR) {
if (const Optional<SVal> &D = getDefaultBinding(state, superR)) {
if (SymbolRef parentSym = D->getAsSymbol())
return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
-
+
if (D->isZeroConstant())
return ValMgr.makeZeroVal(Ty);
-
+
if (D->isUnknown())
return *D;
-
+
assert(0 && "Unknown default value");
}
-
+
// If our super region is a field or element itself, walk up the region
// hierarchy to see if there is a default value installed in an ancestor.
if (isa<FieldRegion>(superR) || isa<ElementRegion>(superR)) {
superR = cast<SubRegion>(superR)->getSuperRegion();
continue;
}
-
+
break;
}
-
+
// Lazy binding?
const GRState *lazyBindingState = NULL;
const MemRegion *lazyBindingRegion = NULL;
llvm::tie(lazyBindingState, lazyBindingRegion) = GetLazyBinding(B, R);
-
+
if (lazyBindingState) {
assert(lazyBindingRegion && "Lazy-binding region not set");
-
+
if (isa<ElementRegion>(R))
return RetrieveElement(lazyBindingState,
cast<ElementRegion>(lazyBindingRegion));
-
+
return RetrieveField(lazyBindingState,
cast<FieldRegion>(lazyBindingRegion));
- }
-
+ }
+
if (R->hasStackStorage() && !R->hasParametersStorage()) {
-
+
if (isa<ElementRegion>(R)) {
// Currently we don't reason specially about Clang-style vectors. Check
// if superR is a vector and if so return Unknown.
if (const TypedRegion *typedSuperR = dyn_cast<TypedRegion>(superR)) {
if (typedSuperR->getValueType(getContext())->isVectorType())
return UnknownVal();
- }
+ }
}
-
+
return UndefinedVal();
}
-
+
// All other values are symbolic.
return ValMgr.getRegionValueSymbolValOrUnknown(R, Ty);
}
-
-SVal RegionStoreManager::RetrieveObjCIvar(const GRState* state,
+
+SVal RegionStoreManager::RetrieveObjCIvar(const GRState* state,
const ObjCIvarRegion* R) {
// Check if the region has a binding.
@@ -1156,50 +1156,50 @@
if (const SVal* V = B.lookup(R))
return *V;
-
+
const MemRegion *superR = R->getSuperRegion();
// Check if the super region has a binding.
if (const SVal *V = B.lookup(superR)) {
if (SymbolRef parentSym = V->getAsSymbol())
return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
-
+
// Other cases: give up.
return UnknownVal();
}
-
+
return RetrieveLazySymbol(state, R);
}
SVal RegionStoreManager::RetrieveVar(const GRState *state,
const VarRegion *R) {
-
+
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(state->getStore());
-
+
if (const SVal* V = B.lookup(R))
return *V;
-
+
// Lazily derive a value for the VarRegion.
const VarDecl *VD = R->getDecl();
-
+
if (R->hasGlobalsOrParametersStorage())
return ValMgr.getRegionValueSymbolValOrUnknown(R, VD->getType());
-
+
return UndefinedVal();
}
-SVal RegionStoreManager::RetrieveLazySymbol(const GRState *state,
+SVal RegionStoreManager::RetrieveLazySymbol(const GRState *state,
const TypedRegion *R) {
-
+
QualType valTy = R->getValueType(getContext());
// All other values are symbolic.
return ValMgr.getRegionValueSymbolValOrUnknown(R, valTy);
}
-SVal RegionStoreManager::RetrieveStruct(const GRState *state,
- const TypedRegion* R){
+SVal RegionStoreManager::RetrieveStruct(const GRState *state,
+ const TypedRegion* R) {
QualType T = R->getValueType(getContext());
assert(T->isStructureType());
@@ -1240,7 +1240,7 @@
for (uint64_t i = 0; i < size; ++i) {
SVal Idx = ValMgr.makeArrayIndex(i);
ElementRegion* ER = MRMgr.getElementRegion(CAT->getElementType(), Idx, R,
- getContext());
+ getContext());
QualType ETy = ER->getElementType();
SVal ElementVal = Retrieve(state, loc::MemRegionVal(ER), ETy).getSVal();
ArrayVal = getBasicVals().consVals(ElementVal, ArrayVal);
@@ -1259,15 +1259,15 @@
Store RegionStoreManager::Remove(Store store, Loc L) {
const MemRegion* R = 0;
-
+
if (isa<loc::MemRegionVal>(L))
R = cast<loc::MemRegionVal>(L).getRegion();
-
+
if (R) {
- RegionBindings B = GetRegionBindings(store);
+ RegionBindings B = GetRegionBindings(store);
return RBFactory.Remove(B, R).getRoot();
}
-
+
return store;
}
@@ -1277,17 +1277,17 @@
// If we get here, the location should be a region.
const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
-
+
// Check if the region is a struct region.
if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
if (TR->getValueType(getContext())->isStructureType())
return BindStruct(state, TR, V);
-
+
// Special case: the current region represents a cast and it and the super
// region both have pointer types or intptr_t types. If so, perform the
// bind to the super region.
// This is needed to support OSAtomicCompareAndSwap and friends or other
- // loads that treat integers as pointers and vis versa.
+ // loads that treat integers as pointers and vis versa.
if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
if (ER->getIndex().isZeroConstant()) {
if (const TypedRegion *superR =
@@ -1295,17 +1295,17 @@
ASTContext &Ctx = getContext();
QualType superTy = superR->getValueType(Ctx);
QualType erTy = ER->getValueType(Ctx);
-
- if (IsAnyPointerOrIntptr(superTy, Ctx) &&
+
+ if (IsAnyPointerOrIntptr(superTy, Ctx) &&
IsAnyPointerOrIntptr(erTy, Ctx)) {
- SValuator::CastResult cr =
- ValMgr.getSValuator().EvalCast(V, state, superTy, erTy);
+ SValuator::CastResult cr =
+ ValMgr.getSValuator().EvalCast(V, state, superTy, erTy);
return Bind(cr.getState(), loc::MemRegionVal(superR), cr.getSVal());
}
}
}
}
-
+
// Perform the binding.
RegionBindings B = GetRegionBindings(state->getStore());
return state->makeWithStore(RBFactory.Add(B, R, V).getRoot());
@@ -1332,7 +1332,7 @@
RegionStoreManager::BindCompoundLiteral(const GRState *state,
const CompoundLiteralExpr* CL,
SVal V) {
-
+
CompoundLiteralRegion* R = MRMgr.getCompoundLiteralRegion(CL);
return Bind(state, loc::MemRegionVal(R), V);
}
@@ -1376,12 +1376,12 @@
// Handle lazy compound values.
if (nonloc::LazyCompoundVal *LCV = dyn_cast<nonloc::LazyCompoundVal>(&Init))
return CopyLazyBindings(*LCV, state, R);
-
- // Remaining case: explicit compound values.
+
+ // Remaining case: explicit compound values.
nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(Init);
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
uint64_t i = 0;
-
+
for (; i < size; ++i, ++VI) {
// The init list might be shorter than the array length.
if (VI == VE)
@@ -1411,10 +1411,10 @@
const GRState *
RegionStoreManager::BindStruct(const GRState *state, const TypedRegion* R,
SVal V) {
-
+
if (!Features.supportsFields())
return state;
-
+
QualType T = R->getValueType(getContext());
assert(T->isStructureType());
@@ -1427,7 +1427,7 @@
// Handle lazy compound values.
if (const nonloc::LazyCompoundVal *LCV = dyn_cast<nonloc::LazyCompoundVal>(&V))
return CopyLazyBindings(*LCV, state, R);
-
+
// We may get non-CompoundVal accidentally due to imprecise cast logic.
// Ignore them and kill the field values.
if (V.isUnknown() || !isa<nonloc::CompoundVal>(V))
@@ -1447,7 +1447,7 @@
FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
if (Loc::IsLocType(FTy) || FTy->isIntegerType())
- state = Bind(state, ValMgr.makeLoc(FR), *VI);
+ state = Bind(state, ValMgr.makeLoc(FR), *VI);
else if (FTy->isArrayType())
state = BindArray(state, FR, *VI);
else if (FTy->isStructureType())
@@ -1484,7 +1484,7 @@
const MemRegion* R, SVal V) {
return state->set<RegionDefaultValue>(R, V);
}
-
+
const GRState*
RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
const GRState *state,
@@ -1496,46 +1496,46 @@
RegionDefaultBindings::Factory &DVMFactory =
state->get_context<RegionDefaultValue>();
- llvm::OwningPtr<RegionStoreSubRegionMap>
+ llvm::OwningPtr<RegionStoreSubRegionMap>
SubRegions(getRegionStoreSubRegionMap(state));
- // B and DVM are updated after the call to RemoveSubRegionBindings.
+ // B and DVM are updated after the call to RemoveSubRegionBindings.
RemoveSubRegionBindings(B, DVM, DVMFactory, R, *SubRegions.get());
-
+
// Now copy the bindings. This amounts to just binding 'V' to 'R'. This
// results in a zero-copy algorithm.
return state->makeWithStore(RBFactory.Add(B, R, V).getRoot());
}
-
+
//===----------------------------------------------------------------------===//
// State pruning.
//===----------------------------------------------------------------------===//
-
+
static void UpdateLiveSymbols(SVal X, SymbolReaper& SymReaper) {
if (loc::MemRegionVal *XR = dyn_cast<loc::MemRegionVal>(&X)) {
const MemRegion *R = XR->getRegion();
-
+
while (R) {
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
SymReaper.markLive(SR->getSymbol());
return;
}
-
+
if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
R = SR->getSuperRegion();
continue;
}
-
+
break;
}
-
+
return;
}
-
+
for (SVal::symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end();SI!=SE;++SI)
SymReaper.markLive(*SI);
}
-
+
namespace {
class VISIBILITY_HIDDEN TreeScanner {
RegionBindings B;
@@ -1558,71 +1558,71 @@
: B(b), DB(db), SymReaper(symReaper), Marked(marked),
ScannedLazyVals(scannedLazyVals), M(m),
RS(rs), RegionRoots(regionRoots), MarkKeys(markKeys) {}
-
+
void scanTree(const MemRegion *R);
};
} // end anonymous namespace
-
-
+
+
void TreeScanner::scanTree(const MemRegion *R) {
if (MarkKeys) {
if (Marked.count(R))
- return;
-
+ return;
+
Marked.insert(R);
}
-
+
// Mark the symbol for any live SymbolicRegion as "live". This means we
// should continue to track that symbol.
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(R))
SymReaper.markLive(SymR->getSymbol());
-
+
// Get the data binding for R (if any).
const SVal* Xptr = B.lookup(R);
-
+
// Check for lazy bindings.
if (const nonloc::LazyCompoundVal *V =
dyn_cast_or_null<nonloc::LazyCompoundVal>(Xptr)) {
-
- const LazyCompoundValData *D = V->getCVData();
+
+ const LazyCompoundValData *D = V->getCVData();
if (!ScannedLazyVals.count(D)) {
// Scan the bindings in the LazyCompoundVal.
ScannedLazyVals.insert(D);
-
+
// FIXME: Cache subregion maps.
const GRState *lazyState = D->getState();
llvm::OwningPtr<RegionStoreSubRegionMap>
lazySM(RS.getRegionStoreSubRegionMap(lazyState));
-
+
Store lazyStore = lazyState->getStore();
RegionBindings lazyB = RS.GetRegionBindings(lazyStore);
-
+
RegionDefaultBindings lazyDB = lazyState->get<RegionDefaultValue>();
-
+
// Scan the bindings.
TreeScanner scan(lazyB, lazyDB, SymReaper, Marked, ScannedLazyVals,
*lazySM.get(), RS, RegionRoots, false);
-
+
scan.scanTree(D->getRegion());
}
}
- else {
- // No direct binding? Get the default binding for R (if any).
+ else {
+ // No direct binding? Get the default binding for R (if any).
if (!Xptr)
Xptr = DB.lookup(R);
-
+
// Direct or default binding?
if (Xptr) {
SVal X = *Xptr;
UpdateLiveSymbols(X, SymReaper); // Update the set of live symbols.
-
+
// If X is a region, then add it to the RegionRoots.
if (const MemRegion *RX = X.getAsRegion()) {
RegionRoots.push_back(RX);
// Mark the super region of the RX as live.
- // e.g.: int x; char *y = (char*) &x; if (*y) ...
+ // e.g.: int x; char *y = (char*) &x; if (*y) ...
// 'y' => element region. 'x' is its super region.
if (const SubRegion *SR = dyn_cast<SubRegion>(RX)) {
RegionRoots.push_back(SR->getSuperRegion());
@@ -1630,39 +1630,39 @@
}
}
}
-
- RegionStoreSubRegionMap::iterator I, E;
+
+ RegionStoreSubRegionMap::iterator I, E;
for (llvm::tie(I, E) = M.begin_end(R); I != E; ++I)
scanTree(*I);
}
-void RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+void RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
-{
+{
Store store = state.getStore();
RegionBindings B = GetRegionBindings(store);
-
+
// Lazily constructed backmap from MemRegions to SubRegions.
typedef llvm::ImmutableSet<const MemRegion*> SubRegionsTy;
typedef llvm::ImmutableMap<const MemRegion*, SubRegionsTy> SubRegionsMapTy;
-
+
// The backmap from regions to subregions.
llvm::OwningPtr<RegionStoreSubRegionMap>
SubRegions(getRegionStoreSubRegionMap(&state));
-
+
// Do a pass over the regions in the store. For VarRegions we check if
// the variable is still live and if so add it to the list of live roots.
- // For other regions we populate our region backmap.
+ // For other regions we populate our region backmap.
llvm::SmallVector<const MemRegion*, 10> IntermediateRoots;
-
+
// Scan the direct bindings for "intermediate" roots.
for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
const MemRegion *R = I.getKey();
IntermediateRoots.push_back(R);
}
-
+
// Scan the default bindings for "intermediate" roots.
RegionDefaultBindings DVM = state.get<RegionDefaultValue>();
for (RegionDefaultBindings::iterator I = DVM.begin(), E = DVM.end();
@@ -1672,18 +1672,18 @@
}
// Process the "intermediate" roots to find if they are referenced by
- // real roots.
+ // real roots.
while (!IntermediateRoots.empty()) {
const MemRegion* R = IntermediateRoots.back();
IntermediateRoots.pop_back();
-
+
if (const VarRegion* VR = dyn_cast<VarRegion>(R)) {
if (SymReaper.isLive(Loc, VR->getDecl())) {
RegionRoots.push_back(VR); // This is a live "root".
}
continue;
}
-
+
if (const SymbolicRegion* SR = dyn_cast<SymbolicRegion>(R)) {
if (SymReaper.isLive(SR->getSymbol()))
RegionRoots.push_back(SR);
@@ -1695,9 +1695,9 @@
dyn_cast<SubRegion>(cast<SubRegion>(R)->getSuperRegion()))
IntermediateRoots.push_back(superR);
}
-
+
// Process the worklist of RegionRoots. This performs a "mark-and-sweep"
- // of the store. We want to find all live symbols and dead regions.
+ // of the store. We want to find all live symbols and dead regions.
llvm::DenseSet<const MemRegion*> Marked;
llvm::DenseSet<const LazyCompoundValData*> LazyVals;
TreeScanner TS(B, DVM, SymReaper, Marked, LazyVals, *SubRegions.get(),
@@ -1707,59 +1707,59 @@
const MemRegion *R = RegionRoots.back();
RegionRoots.pop_back();
TS.scanTree(R);
- }
-
+ }
+
// We have now scanned the store, marking reachable regions and symbols
// as live. We now remove all the regions that are dead from the store
- // as well as update DSymbols with the set symbols that are now dead.
+ // as well as update DSymbols with the set symbols that are now dead.
for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
const MemRegion* R = I.getKey();
// If this region live? Is so, none of its symbols are dead.
if (Marked.count(R))
continue;
-
+
// Remove this dead region from the store.
store = Remove(store, ValMgr.makeLoc(R));
-
+
// Mark all non-live symbols that this region references as dead.
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(R))
SymReaper.maybeDead(SymR->getSymbol());
-
+
SVal X = I.getData();
SVal::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
for (; SI != SE; ++SI)
SymReaper.maybeDead(*SI);
}
-
- // Remove dead 'default' bindings.
+
+ // Remove dead 'default' bindings.
RegionDefaultBindings NewDVM = DVM;
- RegionDefaultBindings::Factory &DVMFactory =
+ RegionDefaultBindings::Factory &DVMFactory =
state.get_context<RegionDefaultValue>();
-
+
for (RegionDefaultBindings::iterator I = DVM.begin(), E = DVM.end();
I != E; ++I) {
const MemRegion *R = I.getKey();
-
+
// If this region live? Is so, none of its symbols are dead.
if (Marked.count(R))
continue;
-
+
// Remove this dead region.
NewDVM = DVMFactory.Remove(NewDVM, R);
-
+
// Mark all non-live symbols that this region references as dead.
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(R))
SymReaper.maybeDead(SymR->getSymbol());
-
+
SVal X = I.getData();
SVal::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
for (; SI != SE; ++SI)
SymReaper.maybeDead(*SI);
}
-
+
// Write the store back.
state.setStore(store);
-
+
// Write the updated default bindings back.
// FIXME: Right now this involves a fetching of a persistent state.
// We can do better.
@@ -1775,7 +1775,7 @@
const char* nl, const char *sep) {
RegionBindings B = GetRegionBindings(store);
OS << "Store (direct bindings):" << nl;
-
+
for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
- OS << ' ' << I.getKey() << " : " << I.getData() << nl;
+ OS << ' ' << I.getKey() << " : " << I.getData() << nl;
}
diff --git a/lib/Analysis/SVals.cpp b/lib/Analysis/SVals.cpp
index 91674b8..688b7ff 100644
--- a/lib/Analysis/SVals.cpp
+++ b/lib/Analysis/SVals.cpp
@@ -58,7 +58,7 @@
return NULL;
}
-/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
+/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
/// wraps a symbol, return that SymbolRef. Otherwise return 0.
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
SymbolRef SVal::getAsLocSymbol() const {
@@ -76,11 +76,11 @@
SymbolRef SVal::getAsSymbol() const {
if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
return X->getSymbol();
-
+
if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
if (SymbolRef Y = dyn_cast<SymbolData>(X->getSymbolicExpression()))
return Y;
-
+
return getAsLocSymbol();
}
@@ -89,7 +89,7 @@
const SymExpr *SVal::getAsSymbolicExpression() const {
if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
return X->getSymbolicExpression();
-
+
return getAsSymbol();
}
@@ -115,13 +115,13 @@
SVal::symbol_iterator::symbol_iterator(const SymExpr *SE) {
itr.push_back(SE);
- while (!isa<SymbolData>(itr.back())) expand();
+ while (!isa<SymbolData>(itr.back())) expand();
}
SVal::symbol_iterator& SVal::symbol_iterator::operator++() {
assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
assert(isa<SymbolData>(itr.back()));
- itr.pop_back();
+ itr.pop_back();
if (!itr.empty())
while (!isa<SymbolData>(itr.back())) expand();
return *this;
@@ -135,17 +135,17 @@
void SVal::symbol_iterator::expand() {
const SymExpr *SE = itr.back();
itr.pop_back();
-
+
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
itr.push_back(SIE->getLHS());
return;
- }
+ }
else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
itr.push_back(SSE->getLHS());
itr.push_back(SSE->getRHS());
return;
}
-
+
assert(false && "unhandled expansion case");
}
@@ -189,10 +189,10 @@
SVal nonloc::ConcreteInt::evalBinOp(ValueManager &ValMgr,
BinaryOperator::Opcode Op,
- const nonloc::ConcreteInt& R) const {
+ const nonloc::ConcreteInt& R) const {
const llvm::APSInt* X =
ValMgr.getBasicValueFactory().EvaluateAPSInt(Op, getValue(), R.getValue());
-
+
if (X)
return nonloc::ConcreteInt(*X);
else
@@ -215,12 +215,12 @@
SVal loc::ConcreteInt::EvalBinOp(BasicValueFactory& BasicVals,
BinaryOperator::Opcode Op,
const loc::ConcreteInt& R) const {
-
+
assert (Op == BinaryOperator::Add || Op == BinaryOperator::Sub ||
(Op >= BinaryOperator::LT && Op <= BinaryOperator::NE));
-
+
const llvm::APSInt* X = BasicVals.EvaluateAPSInt(Op, getValue(), R.getValue());
-
+
if (X)
return loc::ConcreteInt(*X);
else
@@ -234,40 +234,40 @@
void SVal::dump() const { dumpToStream(llvm::errs()); }
void SVal::dumpToStream(llvm::raw_ostream& os) const {
- switch (getBaseKind()) {
+ switch (getBaseKind()) {
case UnknownKind:
os << "Invalid";
- break;
+ break;
case NonLocKind:
cast<NonLoc>(this)->dumpToStream(os);
- break;
+ break;
case LocKind:
cast<Loc>(this)->dumpToStream(os);
- break;
+ break;
case UndefinedKind:
os << "Undefined";
- break;
+ break;
default:
assert (false && "Invalid SVal.");
}
}
void NonLoc::dumpToStream(llvm::raw_ostream& os) const {
- switch (getSubKind()) {
+ switch (getSubKind()) {
case nonloc::ConcreteIntKind:
os << cast<nonloc::ConcreteInt>(this)->getValue().getZExtValue();
if (cast<nonloc::ConcreteInt>(this)->getValue().isUnsigned())
- os << 'U';
- break;
+ os << 'U';
+ break;
case nonloc::SymbolValKind:
os << '$' << cast<nonloc::SymbolVal>(this)->getSymbol();
- break;
+ break;
case nonloc::SymExprValKind: {
const nonloc::SymExprVal& C = *cast<nonloc::SymExprVal>(this);
const SymExpr *SE = C.getSymbolicExpression();
os << SE;
break;
- }
+ }
case nonloc::LocAsIntegerKind: {
const nonloc::LocAsInteger& C = *cast<nonloc::LocAsInteger>(this);
os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]";
@@ -278,7 +278,7 @@
os << "compoundVal{";
bool first = true;
for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
- if (first) {
+ if (first) {
os << ' '; first = false;
}
else
@@ -294,24 +294,24 @@
os << "lazyCompoundVal{" << (void*) C.getState() << ',' << C.getRegion()
<< '}';
break;
- }
+ }
default:
assert (false && "Pretty-printed not implemented for this NonLoc.");
break;
}
}
-void Loc::dumpToStream(llvm::raw_ostream& os) const {
- switch (getSubKind()) {
+void Loc::dumpToStream(llvm::raw_ostream& os) const {
+ switch (getSubKind()) {
case loc::ConcreteIntKind:
os << cast<loc::ConcreteInt>(this)->getValue().getZExtValue() << " (Loc)";
- break;
+ break;
case loc::GotoLabelKind:
os << "&&" << cast<loc::GotoLabel>(this)->getLabel()->getID()->getName();
break;
case loc::MemRegionKind:
os << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
- break;
+ break;
default:
assert(false && "Pretty-printing not implemented for this Loc.");
break;
diff --git a/lib/Analysis/SValuator.cpp b/lib/Analysis/SValuator.cpp
index 2542cfd..383fe45 100644
--- a/lib/Analysis/SValuator.cpp
+++ b/lib/Analysis/SValuator.cpp
@@ -23,94 +23,94 @@
if (L.isUndef() || R.isUndef())
return UndefinedVal();
-
+
if (L.isUnknown() || R.isUnknown())
return UnknownVal();
-
+
if (isa<Loc>(L)) {
if (isa<Loc>(R))
return EvalBinOpLL(Op, cast<Loc>(L), cast<Loc>(R), T);
return EvalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T);
}
-
+
if (isa<Loc>(R)) {
// Support pointer arithmetic where the increment/decrement operand
- // is on the left and the pointer on the right.
+ // is on the left and the pointer on the right.
assert(Op == BinaryOperator::Add || Op == BinaryOperator::Sub);
-
+
// Commute the operands.
return EvalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T);
}
- return EvalBinOpNN(Op, cast<NonLoc>(L), cast<NonLoc>(R), T);
+ return EvalBinOpNN(Op, cast<NonLoc>(L), cast<NonLoc>(R), T);
}
-SValuator::CastResult SValuator::EvalCast(SVal val, const GRState *state,
+SValuator::CastResult SValuator::EvalCast(SVal val, const GRState *state,
QualType castTy, QualType originalTy){
-
+
if (val.isUnknownOrUndef() || castTy == originalTy)
return CastResult(state, val);
-
+
ASTContext &C = ValMgr.getContext();
-
+
// For const casts, just propagate the value.
- if (C.getCanonicalType(castTy).getUnqualifiedType() ==
+ if (C.getCanonicalType(castTy).getUnqualifiedType() ==
C.getCanonicalType(originalTy).getUnqualifiedType())
return CastResult(state, val);
-
+
// Check for casts from pointers to integers.
if (castTy->isIntegerType() && Loc::IsLocType(originalTy))
return CastResult(state, EvalCastL(cast<Loc>(val), castTy));
-
+
// Check for casts from integers to pointers.
if (Loc::IsLocType(castTy) && originalTy->isIntegerType()) {
if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
// Just unpackage the lval and return it.
return CastResult(state, LV->getLoc());
}
-
+
goto DispatchCast;
}
-
+
// Just pass through function and block pointers.
if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
assert(Loc::IsLocType(castTy));
return CastResult(state, val);
}
-
+
// Check for casts from array type to another type.
if (originalTy->isArrayType()) {
// We will always decay to a pointer.
val = ValMgr.getStateManager().ArrayToPointer(cast<Loc>(val));
-
+
// Are we casting from an array to a pointer? If so just pass on
// the decayed value.
if (castTy->isPointerType())
return CastResult(state, val);
-
+
// Are we casting from an array to an integer? If so, cast the decayed
// pointer value to an integer.
assert(castTy->isIntegerType());
-
+
// FIXME: Keep these here for now in case we decide soon that we
// need the original decayed type.
// QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
// QualType pointerTy = C.getPointerType(elemTy);
return CastResult(state, EvalCastL(cast<Loc>(val), castTy));
}
-
+
// Check for casts from a region to a specific type.
if (const MemRegion *R = val.getAsRegion()) {
// FIXME: We should handle the case where we strip off view layers to get
// to a desugared type.
-
+
assert(Loc::IsLocType(castTy));
// We get a symbolic function pointer for a dereference of a function
// pointer, but it is of function type. Example:
-
+
// struct FPRec {
- // void (*my_func)(int * x);
+ // void (*my_func)(int * x);
// };
//
// int bar(int x);
@@ -120,29 +120,29 @@
// (*foo->my_func)(&x);
// return bar(x)+1; // no-warning
// }
-
+
assert(Loc::IsLocType(originalTy) || originalTy->isFunctionType() ||
originalTy->isBlockPointerType());
-
+
StoreManager &storeMgr = ValMgr.getStateManager().getStoreManager();
-
+
// Delegate to store manager to get the result of casting a region
// to a different type.
const StoreManager::CastResult& Res = storeMgr.CastRegion(state, R, castTy);
-
+
// Inspect the result. If the MemRegion* returned is NULL, this
// expression evaluates to UnknownVal.
R = Res.getRegion();
-
+
if (R)
return CastResult(Res.getState(), loc::MemRegionVal(R));
-
+
return CastResult(Res.getState(), UnknownVal());
}
-
- // All other cases.
+
+ // All other cases.
DispatchCast:
return CastResult(state,
- isa<Loc>(val) ? EvalCastL(cast<Loc>(val), castTy)
+ isa<Loc>(val) ? EvalCastL(cast<Loc>(val), castTy)
: EvalCastNL(cast<NonLoc>(val), castTy));
}
diff --git a/lib/Analysis/SimpleConstraintManager.cpp b/lib/Analysis/SimpleConstraintManager.cpp
index 82801eb..db3d68a 100644
--- a/lib/Analysis/SimpleConstraintManager.cpp
+++ b/lib/Analysis/SimpleConstraintManager.cpp
@@ -23,10 +23,10 @@
bool SimpleConstraintManager::canReasonAbout(SVal X) const {
if (nonloc::SymExprVal *SymVal = dyn_cast<nonloc::SymExprVal>(&X)) {
const SymExpr *SE = SymVal->getSymbolicExpression();
-
+
if (isa<SymbolData>(SE))
return true;
-
+
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
switch (SIE->getOpcode()) {
// We don't reason yet about bitwise-constraints on symbolic values.
@@ -46,7 +46,7 @@
// All other cases.
default:
return true;
- }
+ }
}
return false;
@@ -54,7 +54,7 @@
return true;
}
-
+
const GRState *SimpleConstraintManager::Assume(const GRState *state,
SVal Cond, bool Assumption) {
if (Cond.isUnknown()) {
@@ -74,14 +74,14 @@
// EvalAssume is used to call into the GRTransferFunction object to perform
// any checker-specific update of the state based on this assumption being
- // true or false.
+ // true or false.
return state ? state->getTransferFuncs().EvalAssume(state, Cond, Assumption)
: NULL;
}
const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
Loc Cond, bool Assumption) {
-
+
BasicValueFactory &BasicVals = state->getBasicVals();
switch (Cond.getSubKind()) {
@@ -91,7 +91,7 @@
case loc::MemRegionKind: {
// FIXME: Should this go into the storemanager?
-
+
const MemRegion *R = cast<loc::MemRegionVal>(Cond).getRegion();
const SubRegion *SubR = dyn_cast<SubRegion>(R);
@@ -99,7 +99,7 @@
// FIXME: now we only find the first symbolic region.
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
if (Assumption)
- return AssumeSymNE(state, SymR->getSymbol(),
+ return AssumeSymNE(state, SymR->getSymbol(),
BasicVals.getZeroWithPtrWidth());
else
return AssumeSymEQ(state, SymR->getSymbol(),
@@ -107,15 +107,15 @@
}
SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
}
-
+
// FALL-THROUGH.
}
-
+
case loc::GotoLabelKind:
return Assumption ? state : NULL;
case loc::ConcreteIntKind: {
- bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0;
+ bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0;
bool isFeasible = b ? Assumption : !Assumption;
return isFeasible ? state : NULL;
}
@@ -130,7 +130,7 @@
// EvalAssume is used to call into the GRTransferFunction object to perform
// any checker-specific update of the state based on this assumption being
- // true or false.
+ // true or false.
return state ? state->getTransferFuncs().EvalAssume(state, Cond, Assumption)
: NULL;
}
@@ -138,13 +138,13 @@
const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
NonLoc Cond,
bool Assumption) {
-
+
// We cannot reason about SymIntExpr and SymSymExpr.
if (!canReasonAbout(Cond)) {
// Just return the current state indicating that the path is feasible.
// This may be an over-approximation of what is possible.
return state;
- }
+ }
BasicValueFactory &BasicVals = state->getBasicVals();
SymbolManager &SymMgr = state->getSymbolManager();
@@ -156,7 +156,7 @@
case nonloc::SymbolValKind: {
nonloc::SymbolVal& SV = cast<nonloc::SymbolVal>(Cond);
SymbolRef sym = SV.getSymbol();
- QualType T = SymMgr.getType(sym);
+ QualType T = SymMgr.getType(sym);
const llvm::APSInt &zero = BasicVals.getValue(0, T);
return Assumption ? AssumeSymNE(state, sym, zero)
@@ -167,7 +167,7 @@
nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond);
if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression()))
return AssumeSymInt(state, Assumption, SE);
-
+
// For all other symbolic expressions, over-approximate and consider
// the constraint feasible.
return state;
@@ -194,7 +194,7 @@
// rest of the constraint manager logic.
SymbolRef Sym = cast<SymbolData>(SE->getLHS());
const llvm::APSInt &Int = SE->getRHS();
-
+
switch (SE->getOpcode()) {
default:
// No logic yet for other operators. Assume the constraint is feasible.
@@ -218,7 +218,7 @@
case BinaryOperator::LT:
return Assumption ? AssumeSymLT(state, Sym, Int)
: AssumeSymGE(state, Sym, Int);
-
+
case BinaryOperator::LE:
return Assumption ? AssumeSymLE(state, Sym, Int)
: AssumeSymGT(state, Sym, Int);
@@ -226,9 +226,9 @@
}
const GRState *SimpleConstraintManager::AssumeInBound(const GRState *state,
- SVal Idx,
+ SVal Idx,
SVal UpperBound,
- bool Assumption) {
+ bool Assumption) {
// Only support ConcreteInt for now.
if (!(isa<nonloc::ConcreteInt>(Idx) && isa<nonloc::ConcreteInt>(UpperBound)))
diff --git a/lib/Analysis/SimpleConstraintManager.h b/lib/Analysis/SimpleConstraintManager.h
index 1e1a10d..d626dfe 100644
--- a/lib/Analysis/SimpleConstraintManager.h
+++ b/lib/Analysis/SimpleConstraintManager.h
@@ -22,8 +22,8 @@
class SimpleConstraintManager : public ConstraintManager {
public:
SimpleConstraintManager() {}
- virtual ~SimpleConstraintManager();
-
+ virtual ~SimpleConstraintManager();
+
//===------------------------------------------------------------------===//
// Common implementation for the interface provided by ConstraintManager.
//===------------------------------------------------------------------===//
@@ -38,16 +38,16 @@
const GRState *AssumeSymInt(const GRState *state, bool Assumption,
const SymIntExpr *SE);
-
+
const GRState *AssumeInBound(const GRState *state, SVal Idx, SVal UpperBound,
bool Assumption);
-
+
protected:
-
+
//===------------------------------------------------------------------===//
// Interface that subclasses must implement.
//===------------------------------------------------------------------===//
-
+
virtual const GRState *AssumeSymNE(const GRState *state, SymbolRef sym,
const llvm::APSInt& V) = 0;
@@ -65,13 +65,13 @@
virtual const GRState *AssumeSymGE(const GRState *state, SymbolRef sym,
const llvm::APSInt& V) = 0;
-
+
//===------------------------------------------------------------------===//
// Internal implementation.
//===------------------------------------------------------------------===//
-
+
const GRState *AssumeAux(const GRState *state, Loc Cond,bool Assumption);
-
+
const GRState *AssumeAux(const GRState *state, NonLoc Cond, bool Assumption);
};
diff --git a/lib/Analysis/SimpleSValuator.cpp b/lib/Analysis/SimpleSValuator.cpp
index 9850b2e..442845a 100644
--- a/lib/Analysis/SimpleSValuator.cpp
+++ b/lib/Analysis/SimpleSValuator.cpp
@@ -20,22 +20,22 @@
namespace {
class VISIBILITY_HIDDEN SimpleSValuator : public SValuator {
protected:
- virtual SVal EvalCastNL(NonLoc val, QualType castTy);
- virtual SVal EvalCastL(Loc val, QualType castTy);
+ virtual SVal EvalCastNL(NonLoc val, QualType castTy);
+ virtual SVal EvalCastL(Loc val, QualType castTy);
public:
SimpleSValuator(ValueManager &valMgr) : SValuator(valMgr) {}
virtual ~SimpleSValuator() {}
-
- virtual SVal EvalMinus(NonLoc val);
- virtual SVal EvalComplement(NonLoc val);
+
+ virtual SVal EvalMinus(NonLoc val);
+ virtual SVal EvalComplement(NonLoc val);
virtual SVal EvalBinOpNN(BinaryOperator::Opcode op, NonLoc lhs, NonLoc rhs,
QualType resultTy);
virtual SVal EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
QualType resultTy);
virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy);
-};
+};
} // end anonymous namespace
SValuator *clang::CreateSimpleSValuator(ValueManager &valMgr) {
@@ -47,19 +47,19 @@
//===----------------------------------------------------------------------===//
SVal SimpleSValuator::EvalCastNL(NonLoc val, QualType castTy) {
-
+
bool isLocType = Loc::IsLocType(castTy);
-
+
if (nonloc::LocAsInteger *LI = dyn_cast<nonloc::LocAsInteger>(&val)) {
if (isLocType)
return LI->getLoc();
-
- ASTContext &Ctx = ValMgr.getContext();
-
+
+ ASTContext &Ctx = ValMgr.getContext();
+
// FIXME: Support promotions/truncations.
if (Ctx.getTypeSize(castTy) == Ctx.getTypeSize(Ctx.VoidPtrTy))
return val;
-
+
return UnknownVal();
}
@@ -68,17 +68,17 @@
QualType T = Ctx.getCanonicalType(se->getType(Ctx));
if (T == Ctx.getCanonicalType(castTy))
return val;
-
+
return UnknownVal();
}
-
+
if (!isa<nonloc::ConcreteInt>(val))
return UnknownVal();
-
+
// Only handle casts from integers to integers.
if (!isLocType && !castTy->isIntegerType())
return UnknownVal();
-
+
llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue();
i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
i.extOrTrunc(ValMgr.getContext().getTypeSize(castTy));
@@ -90,7 +90,7 @@
}
SVal SimpleSValuator::EvalCastL(Loc val, QualType castTy) {
-
+
// Casts from pointers -> pointers, just return the lval.
//
// Casts from pointers -> references, just return the lval. These
@@ -98,21 +98,21 @@
// casting from va_list* to __builtin_va_list&.
//
assert(!val.isUnknownOrUndef());
-
+
if (Loc::IsLocType(castTy) || castTy->isReferenceType())
return val;
-
+
// FIXME: Handle transparent unions where a value can be "transparently"
// lifted into a union type.
if (castTy->isUnionType())
return UnknownVal();
-
+
assert(castTy->isIntegerType());
unsigned BitWidth = ValMgr.getContext().getTypeSize(castTy);
if (!isa<loc::ConcreteInt>(val))
return ValMgr.makeLocAsInteger(val, BitWidth);
-
+
llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue();
i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
i.extOrTrunc(BitWidth);
@@ -124,7 +124,7 @@
//===----------------------------------------------------------------------===//
SVal SimpleSValuator::EvalMinus(NonLoc val) {
- switch (val.getSubKind()) {
+ switch (val.getSubKind()) {
case nonloc::ConcreteIntKind:
return cast<nonloc::ConcreteInt>(val).evalMinus(ValMgr);
default:
@@ -158,18 +158,18 @@
}
}
-// Equality operators for Locs.
+// Equality operators for Locs.
// FIXME: All this logic will be revamped when we have MemRegion::getLocation()
// implemented.
static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual,
QualType resultTy) {
-
+
switch (lhs.getSubKind()) {
default:
assert(false && "EQ/NE not implemented for this Loc.");
return UnknownVal();
-
+
case loc::ConcreteIntKind: {
if (SymbolRef rSym = rhs.getAsSymbol())
return ValMgr.makeNonLoc(rSym,
@@ -178,7 +178,7 @@
cast<loc::ConcreteInt>(lhs).getValue(),
resultTy);
break;
- }
+ }
case loc::MemRegionKind: {
if (SymbolRef lSym = lhs.getAsLocSymbol()) {
if (isa<loc::ConcreteInt>(rhs)) {
@@ -191,11 +191,11 @@
}
break;
}
-
+
case loc::GotoLabelKind:
break;
}
-
+
return ValMgr.makeTruthVal(isEqual ? lhs == rhs : lhs != rhs, resultTy);
}
@@ -220,17 +220,17 @@
case BinaryOperator::NE:
return ValMgr.makeTruthVal(false, resultTy);
}
-
+
while (1) {
switch (lhs.getSubKind()) {
default:
- return UnknownVal();
+ return UnknownVal();
case nonloc::LocAsIntegerKind: {
- Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
+ Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
switch (rhs.getSubKind()) {
case nonloc::LocAsIntegerKind:
return EvalBinOpLL(op, lhsL, cast<nonloc::LocAsInteger>(rhs).getLoc(),
- resultTy);
+ resultTy);
case nonloc::ConcreteIntKind: {
// Transform the integer into a location and compare.
ASTContext& Ctx = ValMgr.getContext();
@@ -239,7 +239,7 @@
i.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy));
return EvalBinOpLL(op, lhsL, ValMgr.makeLoc(i), resultTy);
}
- default:
+ default:
switch (op) {
case BinaryOperator::EQ:
return ValMgr.makeTruthVal(false, resultTy);
@@ -250,15 +250,15 @@
return UnknownVal();
}
}
- }
+ }
case nonloc::SymExprValKind: {
- // Logical not?
+ // Logical not?
if (!(op == BinaryOperator::EQ && rhs.isZeroConstant()))
return UnknownVal();
const SymExpr *symExpr =
cast<nonloc::SymExprVal>(lhs).getSymbolicExpression();
-
+
// Only handle ($sym op constant) for now.
if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(symExpr)) {
BinaryOperator::Opcode opc = symIntExpr->getOpcode();
@@ -301,7 +301,7 @@
case BinaryOperator::GT:
case BinaryOperator::LE:
case BinaryOperator::GE:
- case BinaryOperator::EQ:
+ case BinaryOperator::EQ:
case BinaryOperator::NE:
opc = NegateComparison(opc);
assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
@@ -310,7 +310,7 @@
}
}
}
- case nonloc::ConcreteIntKind: {
+ case nonloc::ConcreteIntKind: {
if (isa<nonloc::ConcreteInt>(rhs)) {
const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
return lhsInt.evalBinOp(ValMgr, op, cast<nonloc::ConcreteInt>(rhs));
@@ -322,7 +322,7 @@
NonLoc tmp = rhs;
rhs = lhs;
lhs = tmp;
-
+
switch (op) {
case BinaryOperator::LT: op = BinaryOperator::GT; continue;
case BinaryOperator::GT: op = BinaryOperator::LT; continue;
@@ -335,7 +335,7 @@
continue;
default:
return UnknownVal();
- }
+ }
}
}
case nonloc::SymbolValKind: {
@@ -352,7 +352,7 @@
}
SVal SimpleSValuator::EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
- QualType resultTy) {
+ QualType resultTy) {
switch (op) {
default:
return UnknownVal();
@@ -364,7 +364,7 @@
SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
BinaryOperator::Opcode op,
- Loc lhs, NonLoc rhs, QualType resultTy) {
+ Loc lhs, NonLoc rhs, QualType resultTy) {
// Special case: 'rhs' is an integer that has the same width as a pointer and
// we are using the integer location in a comparison. Normally this cannot be
// triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
@@ -377,13 +377,13 @@
if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) {
// Convert the signedness of the integer (if necessary).
if (x->isSigned())
- x = &ValMgr.getBasicValueFactory().getValue(*x, true);
+ x = &ValMgr.getBasicValueFactory().getValue(*x, true);
return EvalBinOpLL(op, lhs, loc::ConcreteInt(*x), resultTy);
}
}
}
-
+
// Delegate pointer arithmetic to the StoreManager.
return state->getStateManager().getStoreManager().EvalBinOp(state, op, lhs,
rhs, resultTy);
diff --git a/lib/Analysis/Store.cpp b/lib/Analysis/Store.cpp
index f0ecda7..d1abd57 100644
--- a/lib/Analysis/Store.cpp
+++ b/lib/Analysis/Store.cpp
@@ -27,7 +27,7 @@
// Create a new ElementRegion.
SVal idx = ValMgr.makeArrayIndex(index);
return CastResult(state, MRMgr.getElementRegion(pointeeTy, idx, region,
- ValMgr.getContext()));
+ ValMgr.getContext()));
}
// FIXME: Merge with the implementation of the same method in MemRegion.cpp
@@ -37,16 +37,16 @@
if (!D->getDefinition(Ctx))
return false;
}
-
+
return true;
}
StoreManager::CastResult
StoreManager::CastRegion(const GRState *state, const MemRegion* R,
QualType CastToTy) {
-
+
ASTContext& Ctx = StateMgr.getContext();
-
+
// Handle casts to Objective-C objects.
if (CastToTy->isObjCObjectPointerType())
return CastResult(state, R->getBaseRegion());
@@ -55,7 +55,7 @@
// FIXME: We may need different solutions, depending on the symbol
// involved. Blocks can be casted to/from 'id', as they can be treated
// as Objective-C objects. This could possibly be handled by enhancing
- // our reasoning of downcasts of symbolic objects.
+ // our reasoning of downcasts of symbolic objects.
if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
return CastResult(state, R);
@@ -72,7 +72,7 @@
// Handle casts to void*. We just pass the region through.
if (CanonPointeeTy.getUnqualifiedType() == Ctx.VoidTy)
return CastResult(state, R);
-
+
// Handle casts from compatible types.
if (R->isBoundable())
if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
@@ -90,7 +90,7 @@
case MemRegion::END_TYPED_REGIONS: {
assert(0 && "Invalid region cast");
break;
- }
+ }
case MemRegion::CodeTextRegionKind: {
// CodeTextRegion should be cast to only a function or block pointer type,
// although they can in practice be casted to anything, e.g, void*,
@@ -98,7 +98,7 @@
// Just pass the region through.
break;
}
-
+
case MemRegion::StringRegionKind:
case MemRegion::ObjCObjectRegionKind:
// FIXME: Need to handle arbitrary downcasts.
@@ -107,9 +107,9 @@
case MemRegion::CompoundLiteralRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::VarRegionKind:
return MakeElementRegion(state, R, PointeeTy, CastToTy);
-
+
case MemRegion::ElementRegionKind: {
// If we are casting from an ElementRegion to another type, the
// algorithm is as follows:
@@ -117,51 +117,51 @@
// (1) Compute the "raw offset" of the ElementRegion from the
// base region. This is done by calling 'getAsRawOffset()'.
//
- // (2a) If we get a 'RegionRawOffset' after calling
+ // (2a) If we get a 'RegionRawOffset' after calling
// 'getAsRawOffset()', determine if the absolute offset
- // can be exactly divided into chunks of the size of the
- // casted-pointee type. If so, create a new ElementRegion with
+ // can be exactly divided into chunks of the size of the
+ // casted-pointee type. If so, create a new ElementRegion with
// the pointee-cast type as the new ElementType and the index
// being the offset divded by the chunk size. If not, create
// a new ElementRegion at offset 0 off the raw offset region.
//
// (2b) If we don't a get a 'RegionRawOffset' after calling
// 'getAsRawOffset()', it means that we are at offset 0.
- //
+ //
// FIXME: Handle symbolic raw offsets.
-
+
const ElementRegion *elementR = cast<ElementRegion>(R);
const RegionRawOffset &rawOff = elementR->getAsRawOffset();
const MemRegion *baseR = rawOff.getRegion();
-
+
// If we cannot compute a raw offset, throw up our hands and return
// a NULL MemRegion*.
if (!baseR)
return CastResult(state, NULL);
-
+
int64_t off = rawOff.getByteOffset();
-
+
if (off == 0) {
// Edge case: we are at 0 bytes off the beginning of baseR. We
// check to see if type we are casting to is the same as the base
- // region. If so, just return the base region.
+ // region. If so, just return the base region.
if (const TypedRegion *TR = dyn_cast<TypedRegion>(baseR)) {
QualType ObjTy = Ctx.getCanonicalType(TR->getValueType(Ctx));
QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
if (CanonPointeeTy == ObjTy)
return CastResult(state, baseR);
}
-
+
// Otherwise, create a new ElementRegion at offset 0.
return MakeElementRegion(state, baseR, PointeeTy, CastToTy, 0);
}
-
+
// We have a non-zero offset from the base region. We want to determine
// if the offset can be evenly divided by sizeof(PointeeTy). If so,
// we create an ElementRegion whose index is that value. Otherwise, we
// create two ElementRegions, one that reflects a raw offset and the other
// that reflects the cast.
-
+
// Compute the index for the new ElementRegion.
int64_t newIndex = 0;
const MemRegion *newSuperR = 0;
@@ -179,18 +179,18 @@
newSuperR = baseR;
}
}
-
+
if (!newSuperR) {
// Create an intermediate ElementRegion to represent the raw byte.
// This will be the super region of the final ElementRegion.
SVal idx = ValMgr.makeArrayIndex(off);
newSuperR = MRMgr.getElementRegion(Ctx.CharTy, idx, baseR, Ctx);
}
-
+
return MakeElementRegion(state, newSuperR, PointeeTy, CastToTy, newIndex);
}
}
-
+
return CastResult(state, R);
}
@@ -204,8 +204,8 @@
QualType castTy) {
if (castTy.isNull())
return SValuator::CastResult(state, V);
-
- ASTContext &Ctx = ValMgr.getContext();
+
+ ASTContext &Ctx = ValMgr.getContext();
return ValMgr.getSValuator().EvalCast(V, state, castTy, R->getValueType(Ctx));
}
diff --git a/lib/Analysis/SymbolManager.cpp b/lib/Analysis/SymbolManager.cpp
index d2a82fd..22e1101 100644
--- a/lib/Analysis/SymbolManager.cpp
+++ b/lib/Analysis/SymbolManager.cpp
@@ -22,7 +22,7 @@
dumpToStream(llvm::errs());
}
-static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) {
+static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) {
switch (Op) {
default:
assert(false && "operator printing not implemented");
@@ -37,13 +37,13 @@
case BinaryOperator::LT: os << "<" ; break;
case BinaryOperator::GT: os << '>' ; break;
case BinaryOperator::LE: os << "<=" ; break;
- case BinaryOperator::GE: os << ">=" ; break;
+ case BinaryOperator::GE: os << ">=" ; break;
case BinaryOperator::EQ: os << "==" ; break;
case BinaryOperator::NE: os << "!=" ; break;
case BinaryOperator::And: os << '&' ; break;
case BinaryOperator::Xor: os << '^' ; break;
case BinaryOperator::Or: os << '|' ; break;
- }
+ }
}
void SymIntExpr::dumpToStream(llvm::raw_ostream& os) const {
@@ -54,14 +54,14 @@
os << ' ' << getRHS().getZExtValue();
if (getRHS().isUnsigned()) os << 'U';
}
-
+
void SymSymExpr::dumpToStream(llvm::raw_ostream& os) const {
os << '(';
getLHS()->dumpToStream(os);
os << ") ";
os << '(';
getRHS()->dumpToStream(os);
- os << ')';
+ os << ')';
}
void SymbolConjured::dumpToStream(llvm::raw_ostream& os) const {
@@ -77,60 +77,60 @@
os << "reg_$" << getSymbolID() << "<" << R << ">";
}
-const SymbolRegionValue*
+const SymbolRegionValue*
SymbolManager::getRegionValueSymbol(const MemRegion* R, QualType T) {
llvm::FoldingSetNodeID profile;
SymbolRegionValue::Profile(profile, R, T);
- void* InsertPos;
- SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
- if (!SD) {
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
- new (SD) SymbolRegionValue(SymbolCounter, R, T);
+ new (SD) SymbolRegionValue(SymbolCounter, R, T);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
-
+
return cast<SymbolRegionValue>(SD);
}
const SymbolConjured*
SymbolManager::getConjuredSymbol(const Stmt* E, QualType T, unsigned Count,
const void* SymbolTag) {
-
+
llvm::FoldingSetNodeID profile;
SymbolConjured::Profile(profile, E, T, Count, SymbolTag);
- void* InsertPos;
- SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
- if (!SD) {
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
- new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag);
- DataSet.InsertNode(SD, InsertPos);
+ new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
-
+
return cast<SymbolConjured>(SD);
}
const SymbolDerived*
SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
const TypedRegion *R) {
-
+
llvm::FoldingSetNodeID profile;
SymbolDerived::Profile(profile, parentSymbol, R);
- void* InsertPos;
- SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
- if (!SD) {
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
- DataSet.InsertNode(SD, InsertPos);
+ DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
-
+
return cast<SymbolDerived>(SD);
}
const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
- BinaryOperator::Opcode op,
+ BinaryOperator::Opcode op,
const llvm::APSInt& v,
QualType t) {
llvm::FoldingSetNodeID ID;
@@ -143,7 +143,7 @@
new (data) SymIntExpr(lhs, op, v, t);
DataSet.InsertNode(data, InsertPos);
}
-
+
return cast<SymIntExpr>(data);
}
@@ -161,7 +161,7 @@
new (data) SymSymExpr(lhs, op, rhs, t);
DataSet.InsertNode(data, InsertPos);
}
-
+
return cast<SymSymExpr>(data);
}
@@ -180,7 +180,7 @@
if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
return TR->getValueType(C);
-
+
return QualType();
}
@@ -198,7 +198,7 @@
bool SymbolReaper::maybeDead(SymbolRef sym) {
if (isLive(sym))
return false;
-
+
TheDead.insert(sym);
return true;
}
@@ -206,7 +206,7 @@
bool SymbolReaper::isLive(SymbolRef sym) {
if (TheLiving.count(sym))
return true;
-
+
if (const SymbolDerived *derived = dyn_cast<SymbolDerived>(sym)) {
if (isLive(derived->getParentSymbol())) {
markLive(sym);
@@ -214,7 +214,7 @@
}
return false;
}
-
+
// Interogate the symbol. It may derive from an input value to
// the analyzed function/method.
return isa<SymbolRegionValue>(sym);
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index 014ea82..8e7b158 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -25,21 +25,21 @@
//===----------------------------------------------------------------------===//
// Dataflow initialization logic.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
class VISIBILITY_HIDDEN RegisterDecls
- : public CFGRecStmtDeclVisitor<RegisterDecls> {
+ : public CFGRecStmtDeclVisitor<RegisterDecls> {
UninitializedValues::AnalysisDataTy& AD;
public:
RegisterDecls(UninitializedValues::AnalysisDataTy& ad) : AD(ad) {}
-
+
void VisitVarDecl(VarDecl* VD) { AD.Register(VD); }
CFG& getCFG() { return AD.getCFG(); }
};
-
+
} // end anonymous namespace
void UninitializedValues::InitializeValues(const CFG& cfg) {
@@ -49,25 +49,25 @@
//===----------------------------------------------------------------------===//
// Transfer functions.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
class VISIBILITY_HIDDEN TransferFuncs
: public CFGStmtVisitor<TransferFuncs,bool> {
-
+
UninitializedValues::ValTy V;
UninitializedValues::AnalysisDataTy& AD;
public:
TransferFuncs(UninitializedValues::AnalysisDataTy& ad) : AD(ad) {}
-
+
UninitializedValues::ValTy& getVal() { return V; }
CFG& getCFG() { return AD.getCFG(); }
-
+
void SetTopValue(UninitializedValues::ValTy& X) {
X.setDeclValues(AD);
X.resetBlkExprValues(AD);
}
-
+
bool VisitDeclRefExpr(DeclRefExpr* DR);
bool VisitBinaryOperator(BinaryOperator* B);
bool VisitUnaryOperator(UnaryOperator* U);
@@ -76,24 +76,24 @@
bool VisitDeclStmt(DeclStmt* D);
bool VisitConditionalOperator(ConditionalOperator* C);
bool BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
-
+
bool Visit(Stmt *S);
bool BlockStmt_VisitExpr(Expr* E);
-
+
void VisitTerminator(CFGBlock* B) { }
};
-
+
static const bool Initialized = false;
-static const bool Uninitialized = true;
+static const bool Uninitialized = true;
bool TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
-
+
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
if (VD->isBlockVarDecl()) {
-
+
if (AD.Observer)
AD.Observer->ObserveDeclRefExpr(V, AD, DR, VD);
-
+
// Pseudo-hack to prevent cascade of warnings. If an accessed variable
// is uninitialized, then we are already going to flag a warning for
// this variable, which a "source" of uninitialized values.
@@ -103,17 +103,17 @@
if (AD.FullUninitTaint)
return V(VD,AD);
}
-
+
return Initialized;
}
static VarDecl* FindBlockVarDecl(Expr* E) {
-
+
// Blast through casts and parentheses to find any DeclRefExprs that
// refer to a block VarDecl.
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
- if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
if (VD->isBlockVarDecl()) return VD;
return NULL;
@@ -136,7 +136,7 @@
for (DeclStmt::decl_iterator I=S->decl_begin(), E=S->decl_end(); I!=E; ++I) {
VarDecl *VD = dyn_cast<VarDecl>(*I);
if (VD && VD->isBlockVarDecl()) {
- if (Stmt* I = VD->getInit())
+ if (Stmt* I = VD->getInit())
V(VD,AD) = AD.FullUninitTaint ? V(cast<Expr>(I),AD) : Initialized;
else {
// Special case for declarations of array types. For things like:
@@ -145,20 +145,20 @@
//
// we should treat "x" as being initialized, because the variable
// "x" really refers to the memory block. Clearly x[1] is
- // uninitialized, but expressions like "(char *) x" really do refer to
- // an initialized value. This simple dataflow analysis does not reason
+ // uninitialized, but expressions like "(char *) x" really do refer to
+ // an initialized value. This simple dataflow analysis does not reason
// about the contents of arrays, although it could be potentially
// extended to do so if the array were of constant size.
if (VD->getType()->isArrayType())
V(VD,AD) = Initialized;
- else
+ else
V(VD,AD) = Uninitialized;
}
}
}
return Uninitialized; // Value is never consumed.
}
-
+
bool TransferFuncs::VisitCallExpr(CallExpr* C) {
VisitChildren(C);
return Initialized;
@@ -172,14 +172,14 @@
return V(VD,AD) = Initialized;
break;
}
-
+
default:
break;
}
return Visit(U->getSubExpr());
}
-
+
bool
TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
// This represents a use of the 'collection'
@@ -203,12 +203,12 @@
else
return Visit(ElemExpr);
}
-
+
V(VD,AD) = Initialized;
return Initialized;
}
-
-
+
+
bool TransferFuncs::VisitConditionalOperator(ConditionalOperator* C) {
Visit(C->getCond());
@@ -228,21 +228,21 @@
// or "Initialized" to variables referenced in the other subexpressions.
for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E; ++I)
if (*I && Visit(*I) == Uninitialized) x = Uninitialized;
-
+
return x;
}
-
+
bool TransferFuncs::Visit(Stmt *S) {
if (AD.isTracked(static_cast<Expr*>(S))) return V(static_cast<Expr*>(S),AD);
else return static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(S);
}
bool TransferFuncs::BlockStmt_VisitExpr(Expr* E) {
- bool x = static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(E);
+ bool x = static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(E);
if (AD.isTracked(E)) V(E,AD) = x;
return x;
}
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -255,7 +255,7 @@
// Merges take the same approach, preferring soundness. At a confluence point,
// if any predecessor has a variable marked uninitialized, the value is
// uninitialized at the confluence point.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
typedef StmtDeclBitVector_Types::Union Merge;
@@ -264,28 +264,28 @@
//===----------------------------------------------------------------------===//
// Uninitialized values checker. Scan an AST and flag variable uses
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
UninitializedValues_ValueTypes::ObserverTy::~ObserverTy() {}
namespace {
class VISIBILITY_HIDDEN UninitializedValuesChecker
: public UninitializedValues::ObserverTy {
-
+
ASTContext &Ctx;
Diagnostic &Diags;
llvm::SmallPtrSet<VarDecl*,10> AlreadyWarned;
-
+
public:
UninitializedValuesChecker(ASTContext &ctx, Diagnostic &diags)
: Ctx(ctx), Diags(diags) {}
-
+
virtual void ObserveDeclRefExpr(UninitializedValues::ValTy& V,
UninitializedValues::AnalysisDataTy& AD,
DeclRefExpr* DR, VarDecl* VD) {
assert ( AD.isTracked(VD) && "Unknown VarDecl.");
-
+
if (V(VD,AD) == Uninitialized)
if (AlreadyWarned.insert(VD))
Diags.Report(Ctx.getFullLoc(DR->getSourceRange().getBegin()),
@@ -297,13 +297,13 @@
namespace clang {
void CheckUninitializedValues(CFG& cfg, ASTContext &Ctx, Diagnostic &Diags,
bool FullUninitTaint) {
-
+
// Compute the uninitialized values information.
UninitializedValues U(cfg);
U.getAnalysisData().FullUninitTaint = FullUninitTaint;
Solver S(U);
S.runOnCFG(cfg);
-
+
// Scan for DeclRefExprs that use uninitialized values.
UninitializedValuesChecker Observer(Ctx,Diags);
U.getAnalysisData().Observer = &Observer;
diff --git a/lib/Analysis/ValueManager.cpp b/lib/Analysis/ValueManager.cpp
index 44334ce..9fe16af 100644
--- a/lib/Analysis/ValueManager.cpp
+++ b/lib/Analysis/ValueManager.cpp
@@ -28,10 +28,10 @@
if (T->isIntegerType())
return makeIntVal(0, T);
-
+
// FIXME: Handle floats.
// FIXME: Handle structs.
- return UnknownVal();
+ return UnknownVal();
}
//===----------------------------------------------------------------------===//
@@ -58,14 +58,14 @@
SVal ValueManager::convertToArrayIndex(SVal V) {
if (V.isUnknownOrUndef())
return V;
-
+
// Common case: we have an appropriately sized integer.
if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&V)) {
const llvm::APSInt& I = CI->getValue();
if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
return V;
}
-
+
return SVator->EvalCastNL(cast<NonLoc>(V), ArrayIndexTy);
}
@@ -75,24 +75,24 @@
const TypedRegion* TR = cast<TypedRegion>(R);
T = TR->getValueType(SymMgr.getContext());
}
-
+
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
SymbolRef sym = SymMgr.getRegionValueSymbol(R, T);
-
+
if (Loc::IsLocType(T))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
+
return nonloc::SymbolVal(sym);
}
SVal ValueManager::getConjuredSymbolVal(const Expr *E, unsigned Count) {
QualType T = E->getType();
-
+
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
-
+
SymbolRef sym = SymMgr.getConjuredSymbol(E, Count);
if (Loc::IsLocType(T))
@@ -103,7 +103,7 @@
SVal ValueManager::getConjuredSymbolVal(const Expr *E, QualType T,
unsigned Count) {
-
+
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
@@ -122,12 +122,12 @@
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
-
+
SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, R);
-
+
if (Loc::IsLocType(T))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
+
return nonloc::SymbolVal(sym);
}