Shih-wei Liao | f8fd82b | 2010-02-10 11:10:31 -0800 | [diff] [blame^] | 1 | //== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defined the types Store and StoreManager. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "clang/Checker/PathSensitive/Store.h" |
| 15 | #include "clang/Checker/PathSensitive/GRState.h" |
| 16 | #include "clang/AST/CharUnits.h" |
| 17 | |
| 18 | using namespace clang; |
| 19 | |
| 20 | StoreManager::StoreManager(GRStateManager &stateMgr) |
| 21 | : ValMgr(stateMgr.getValueManager()), StateMgr(stateMgr), |
| 22 | MRMgr(ValMgr.getRegionManager()), Ctx(stateMgr.getContext()) {} |
| 23 | |
| 24 | const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base, |
| 25 | QualType EleTy, uint64_t index) { |
| 26 | SVal idx = ValMgr.makeArrayIndex(index); |
| 27 | return MRMgr.getElementRegion(EleTy, idx, Base, ValMgr.getContext()); |
| 28 | } |
| 29 | |
| 30 | // FIXME: Merge with the implementation of the same method in MemRegion.cpp |
| 31 | static bool IsCompleteType(ASTContext &Ctx, QualType Ty) { |
| 32 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
| 33 | const RecordDecl *D = RT->getDecl(); |
| 34 | if (!D->getDefinition(Ctx)) |
| 35 | return false; |
| 36 | } |
| 37 | |
| 38 | return true; |
| 39 | } |
| 40 | |
| 41 | const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy) { |
| 42 | |
| 43 | ASTContext& Ctx = StateMgr.getContext(); |
| 44 | |
| 45 | // Handle casts to Objective-C objects. |
| 46 | if (CastToTy->isObjCObjectPointerType()) |
| 47 | return R->StripCasts(); |
| 48 | |
| 49 | if (CastToTy->isBlockPointerType()) { |
| 50 | // FIXME: We may need different solutions, depending on the symbol |
| 51 | // involved. Blocks can be casted to/from 'id', as they can be treated |
| 52 | // as Objective-C objects. This could possibly be handled by enhancing |
| 53 | // our reasoning of downcasts of symbolic objects. |
| 54 | if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R)) |
| 55 | return R; |
| 56 | |
| 57 | // We don't know what to make of it. Return a NULL region, which |
| 58 | // will be interpretted as UnknownVal. |
| 59 | return NULL; |
| 60 | } |
| 61 | |
| 62 | // Now assume we are casting from pointer to pointer. Other cases should |
| 63 | // already be handled. |
| 64 | QualType PointeeTy = CastToTy->getAs<PointerType>()->getPointeeType(); |
| 65 | QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); |
| 66 | |
| 67 | // Handle casts to void*. We just pass the region through. |
| 68 | if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy) |
| 69 | return R; |
| 70 | |
| 71 | // Handle casts from compatible types. |
| 72 | if (R->isBoundable()) |
| 73 | if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) { |
| 74 | QualType ObjTy = Ctx.getCanonicalType(TR->getValueType(Ctx)); |
| 75 | if (CanonPointeeTy == ObjTy) |
| 76 | return R; |
| 77 | } |
| 78 | |
| 79 | // Process region cast according to the kind of the region being cast. |
| 80 | switch (R->getKind()) { |
| 81 | case MemRegion::CXXThisRegionKind: |
| 82 | case MemRegion::GenericMemSpaceRegionKind: |
| 83 | case MemRegion::StackLocalsSpaceRegionKind: |
| 84 | case MemRegion::StackArgumentsSpaceRegionKind: |
| 85 | case MemRegion::HeapSpaceRegionKind: |
| 86 | case MemRegion::UnknownSpaceRegionKind: |
| 87 | case MemRegion::GlobalsSpaceRegionKind: { |
| 88 | assert(0 && "Invalid region cast"); |
| 89 | break; |
| 90 | } |
| 91 | |
| 92 | case MemRegion::FunctionTextRegionKind: |
| 93 | case MemRegion::BlockTextRegionKind: |
| 94 | case MemRegion::BlockDataRegionKind: { |
| 95 | // CodeTextRegion should be cast to only a function or block pointer type, |
| 96 | // although they can in practice be casted to anything, e.g, void*, char*, |
| 97 | // etc. |
| 98 | // Just return the region. |
| 99 | return R; |
| 100 | } |
| 101 | |
| 102 | case MemRegion::StringRegionKind: |
| 103 | // FIXME: Need to handle arbitrary downcasts. |
| 104 | case MemRegion::SymbolicRegionKind: |
| 105 | case MemRegion::AllocaRegionKind: |
| 106 | case MemRegion::CompoundLiteralRegionKind: |
| 107 | case MemRegion::FieldRegionKind: |
| 108 | case MemRegion::ObjCIvarRegionKind: |
| 109 | case MemRegion::VarRegionKind: |
| 110 | case MemRegion::CXXObjectRegionKind: |
| 111 | return MakeElementRegion(R, PointeeTy); |
| 112 | |
| 113 | case MemRegion::ElementRegionKind: { |
| 114 | // If we are casting from an ElementRegion to another type, the |
| 115 | // algorithm is as follows: |
| 116 | // |
| 117 | // (1) Compute the "raw offset" of the ElementRegion from the |
| 118 | // base region. This is done by calling 'getAsRawOffset()'. |
| 119 | // |
| 120 | // (2a) If we get a 'RegionRawOffset' after calling |
| 121 | // 'getAsRawOffset()', determine if the absolute offset |
| 122 | // can be exactly divided into chunks of the size of the |
| 123 | // casted-pointee type. If so, create a new ElementRegion with |
| 124 | // the pointee-cast type as the new ElementType and the index |
| 125 | // being the offset divded by the chunk size. If not, create |
| 126 | // a new ElementRegion at offset 0 off the raw offset region. |
| 127 | // |
| 128 | // (2b) If we don't a get a 'RegionRawOffset' after calling |
| 129 | // 'getAsRawOffset()', it means that we are at offset 0. |
| 130 | // |
| 131 | // FIXME: Handle symbolic raw offsets. |
| 132 | |
| 133 | const ElementRegion *elementR = cast<ElementRegion>(R); |
| 134 | const RegionRawOffset &rawOff = elementR->getAsRawOffset(); |
| 135 | const MemRegion *baseR = rawOff.getRegion(); |
| 136 | |
| 137 | // If we cannot compute a raw offset, throw up our hands and return |
| 138 | // a NULL MemRegion*. |
| 139 | if (!baseR) |
| 140 | return NULL; |
| 141 | |
| 142 | CharUnits off = CharUnits::fromQuantity(rawOff.getByteOffset()); |
| 143 | |
| 144 | if (off.isZero()) { |
| 145 | // Edge case: we are at 0 bytes off the beginning of baseR. We |
| 146 | // check to see if type we are casting to is the same as the base |
| 147 | // region. If so, just return the base region. |
| 148 | if (const TypedRegion *TR = dyn_cast<TypedRegion>(baseR)) { |
| 149 | QualType ObjTy = Ctx.getCanonicalType(TR->getValueType(Ctx)); |
| 150 | QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); |
| 151 | if (CanonPointeeTy == ObjTy) |
| 152 | return baseR; |
| 153 | } |
| 154 | |
| 155 | // Otherwise, create a new ElementRegion at offset 0. |
| 156 | return MakeElementRegion(baseR, PointeeTy); |
| 157 | } |
| 158 | |
| 159 | // We have a non-zero offset from the base region. We want to determine |
| 160 | // if the offset can be evenly divided by sizeof(PointeeTy). If so, |
| 161 | // we create an ElementRegion whose index is that value. Otherwise, we |
| 162 | // create two ElementRegions, one that reflects a raw offset and the other |
| 163 | // that reflects the cast. |
| 164 | |
| 165 | // Compute the index for the new ElementRegion. |
| 166 | int64_t newIndex = 0; |
| 167 | const MemRegion *newSuperR = 0; |
| 168 | |
| 169 | // We can only compute sizeof(PointeeTy) if it is a complete type. |
| 170 | if (IsCompleteType(Ctx, PointeeTy)) { |
| 171 | // Compute the size in **bytes**. |
| 172 | CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy); |
| 173 | |
| 174 | // Is the offset a multiple of the size? If so, we can layer the |
| 175 | // ElementRegion (with elementType == PointeeTy) directly on top of |
| 176 | // the base region. |
| 177 | if (off % pointeeTySize == 0) { |
| 178 | newIndex = off / pointeeTySize; |
| 179 | newSuperR = baseR; |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | if (!newSuperR) { |
| 184 | // Create an intermediate ElementRegion to represent the raw byte. |
| 185 | // This will be the super region of the final ElementRegion. |
| 186 | newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity()); |
| 187 | } |
| 188 | |
| 189 | return MakeElementRegion(newSuperR, PointeeTy, newIndex); |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | assert(0 && "unreachable"); |
| 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | |
| 198 | /// CastRetrievedVal - Used by subclasses of StoreManager to implement |
| 199 | /// implicit casts that arise from loads from regions that are reinterpreted |
| 200 | /// as another region. |
| 201 | SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R, |
| 202 | QualType castTy, bool performTestOnly) { |
| 203 | |
| 204 | if (castTy.isNull()) |
| 205 | return V; |
| 206 | |
| 207 | ASTContext &Ctx = ValMgr.getContext(); |
| 208 | |
| 209 | if (performTestOnly) { |
| 210 | // Automatically translate references to pointers. |
| 211 | QualType T = R->getValueType(Ctx); |
| 212 | if (const ReferenceType *RT = T->getAs<ReferenceType>()) |
| 213 | T = Ctx.getPointerType(RT->getPointeeType()); |
| 214 | |
| 215 | assert(ValMgr.getContext().hasSameUnqualifiedType(castTy, T)); |
| 216 | return V; |
| 217 | } |
| 218 | |
| 219 | if (const Loc *L = dyn_cast<Loc>(&V)) |
| 220 | return ValMgr.getSValuator().EvalCastL(*L, castTy); |
| 221 | else if (const NonLoc *NL = dyn_cast<NonLoc>(&V)) |
| 222 | return ValMgr.getSValuator().EvalCastNL(*NL, castTy); |
| 223 | |
| 224 | return V; |
| 225 | } |
| 226 | |
| 227 | Store StoreManager::InvalidateRegions(Store store, |
| 228 | const MemRegion * const *I, |
| 229 | const MemRegion * const *End, |
| 230 | const Expr *E, unsigned Count, |
| 231 | InvalidatedSymbols *IS) { |
| 232 | for ( ; I != End ; ++I) |
| 233 | store = InvalidateRegion(store, *I, E, Count, IS); |
| 234 | |
| 235 | return store; |
| 236 | } |
| 237 | |
| 238 | SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) { |
| 239 | if (Base.isUnknownOrUndef()) |
| 240 | return Base; |
| 241 | |
| 242 | Loc BaseL = cast<Loc>(Base); |
| 243 | const MemRegion* BaseR = 0; |
| 244 | |
| 245 | switch (BaseL.getSubKind()) { |
| 246 | case loc::MemRegionKind: |
| 247 | BaseR = cast<loc::MemRegionVal>(BaseL).getRegion(); |
| 248 | break; |
| 249 | |
| 250 | case loc::GotoLabelKind: |
| 251 | // These are anormal cases. Flag an undefined value. |
| 252 | return UndefinedVal(); |
| 253 | |
| 254 | case loc::ConcreteIntKind: |
| 255 | // While these seem funny, this can happen through casts. |
| 256 | // FIXME: What we should return is the field offset. For example, |
| 257 | // add the field offset to the integer value. That way funny things |
| 258 | // like this work properly: &(((struct foo *) 0xa)->f) |
| 259 | return Base; |
| 260 | |
| 261 | default: |
| 262 | assert(0 && "Unhandled Base."); |
| 263 | return Base; |
| 264 | } |
| 265 | |
| 266 | // NOTE: We must have this check first because ObjCIvarDecl is a subclass |
| 267 | // of FieldDecl. |
| 268 | if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D)) |
| 269 | return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR)); |
| 270 | |
| 271 | return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR)); |
| 272 | } |
| 273 | |
| 274 | SVal StoreManager::getLValueElement(QualType elementType, SVal Offset, |
| 275 | SVal Base) { |
| 276 | |
| 277 | // If the base is an unknown or undefined value, just return it back. |
| 278 | // FIXME: For absolute pointer addresses, we just return that value back as |
| 279 | // well, although in reality we should return the offset added to that |
| 280 | // value. |
| 281 | if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base)) |
| 282 | return Base; |
| 283 | |
| 284 | // Only handle integer offsets... for now. |
| 285 | if (!isa<nonloc::ConcreteInt>(Offset)) |
| 286 | return UnknownVal(); |
| 287 | |
| 288 | const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion(); |
| 289 | |
| 290 | // Pointer of any type can be cast and used as array base. |
| 291 | const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion); |
| 292 | |
| 293 | // Convert the offset to the appropriate size and signedness. |
| 294 | Offset = ValMgr.convertToArrayIndex(Offset); |
| 295 | |
| 296 | if (!ElemR) { |
| 297 | // |
| 298 | // If the base region is not an ElementRegion, create one. |
| 299 | // This can happen in the following example: |
| 300 | // |
| 301 | // char *p = __builtin_alloc(10); |
| 302 | // p[1] = 8; |
| 303 | // |
| 304 | // Observe that 'p' binds to an AllocaRegion. |
| 305 | // |
| 306 | return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, |
| 307 | BaseRegion, Ctx)); |
| 308 | } |
| 309 | |
| 310 | SVal BaseIdx = ElemR->getIndex(); |
| 311 | |
| 312 | if (!isa<nonloc::ConcreteInt>(BaseIdx)) |
| 313 | return UnknownVal(); |
| 314 | |
| 315 | const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue(); |
| 316 | const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue(); |
| 317 | assert(BaseIdxI.isSigned()); |
| 318 | |
| 319 | // Compute the new index. |
| 320 | SVal NewIdx = nonloc::ConcreteInt( |
| 321 | ValMgr.getBasicValueFactory().getValue(BaseIdxI + OffI)); |
| 322 | |
| 323 | // Construct the new ElementRegion. |
| 324 | const MemRegion *ArrayR = ElemR->getSuperRegion(); |
| 325 | return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR, |
| 326 | Ctx)); |
| 327 | } |