1 //== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defined the types Store and StoreManager. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/StaticAnalyzer/Core/PathSensitive/Store.h" 15 #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" 16 #include "clang/AST/CharUnits.h" 17 18 using namespace clang; 19 using namespace ento; 20 21 StoreManager::StoreManager(ProgramStateManager &stateMgr) 22 : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr), 23 MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {} 24 25 StoreRef StoreManager::enterStackFrame(const ProgramState *state, 26 const LocationContext *callerCtx, 27 const StackFrameContext *calleeCtx) { 28 return StoreRef(state->getStore(), *this); 29 } 30 31 const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base, 32 QualType EleTy, uint64_t index) { 33 NonLoc idx = svalBuilder.makeArrayIndex(index); 34 return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext()); 35 } 36 37 // FIXME: Merge with the implementation of the same method in MemRegion.cpp 38 static bool IsCompleteType(ASTContext &Ctx, QualType Ty) { 39 if (const RecordType *RT = Ty->getAs<RecordType>()) { 40 const RecordDecl *D = RT->getDecl(); 41 if (!D->getDefinition()) 42 return false; 43 } 44 45 return true; 46 } 47 48 StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) { 49 return StoreRef(store, *this); 50 } 51 52 const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R, 53 QualType T) { 54 NonLoc idx = svalBuilder.makeZeroArrayIndex(); 55 assert(!T.isNull()); 56 return MRMgr.getElementRegion(T, idx, R, Ctx); 57 } 58 59 const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) { 60 61 ASTContext &Ctx = StateMgr.getContext(); 62 63 // Handle casts to Objective-C objects. 64 if (CastToTy->isObjCObjectPointerType()) 65 return R->StripCasts(); 66 67 if (CastToTy->isBlockPointerType()) { 68 // FIXME: We may need different solutions, depending on the symbol 69 // involved. Blocks can be casted to/from 'id', as they can be treated 70 // as Objective-C objects. This could possibly be handled by enhancing 71 // our reasoning of downcasts of symbolic objects. 72 if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R)) 73 return R; 74 75 // We don't know what to make of it. Return a NULL region, which 76 // will be interpretted as UnknownVal. 77 return NULL; 78 } 79 80 // Now assume we are casting from pointer to pointer. Other cases should 81 // already be handled. 82 QualType PointeeTy = CastToTy->getPointeeType(); 83 QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); 84 85 // Handle casts to void*. We just pass the region through. 86 if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy) 87 return R; 88 89 // Handle casts from compatible types. 90 if (R->isBoundable()) 91 if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) { 92 QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); 93 if (CanonPointeeTy == ObjTy) 94 return R; 95 } 96 97 // Process region cast according to the kind of the region being cast. 98 switch (R->getKind()) { 99 case MemRegion::CXXThisRegionKind: 100 case MemRegion::GenericMemSpaceRegionKind: 101 case MemRegion::StackLocalsSpaceRegionKind: 102 case MemRegion::StackArgumentsSpaceRegionKind: 103 case MemRegion::HeapSpaceRegionKind: 104 case MemRegion::UnknownSpaceRegionKind: 105 case MemRegion::StaticGlobalSpaceRegionKind: 106 case MemRegion::GlobalInternalSpaceRegionKind: 107 case MemRegion::GlobalSystemSpaceRegionKind: 108 case MemRegion::GlobalImmutableSpaceRegionKind: { 109 llvm_unreachable("Invalid region cast"); 110 } 111 112 case MemRegion::FunctionTextRegionKind: 113 case MemRegion::BlockTextRegionKind: 114 case MemRegion::BlockDataRegionKind: 115 case MemRegion::StringRegionKind: 116 // FIXME: Need to handle arbitrary downcasts. 117 case MemRegion::SymbolicRegionKind: 118 case MemRegion::AllocaRegionKind: 119 case MemRegion::CompoundLiteralRegionKind: 120 case MemRegion::FieldRegionKind: 121 case MemRegion::ObjCIvarRegionKind: 122 case MemRegion::VarRegionKind: 123 case MemRegion::CXXTempObjectRegionKind: 124 case MemRegion::CXXBaseObjectRegionKind: 125 return MakeElementRegion(R, PointeeTy); 126 127 case MemRegion::ElementRegionKind: { 128 // If we are casting from an ElementRegion to another type, the 129 // algorithm is as follows: 130 // 131 // (1) Compute the "raw offset" of the ElementRegion from the 132 // base region. This is done by calling 'getAsRawOffset()'. 133 // 134 // (2a) If we get a 'RegionRawOffset' after calling 135 // 'getAsRawOffset()', determine if the absolute offset 136 // can be exactly divided into chunks of the size of the 137 // casted-pointee type. If so, create a new ElementRegion with 138 // the pointee-cast type as the new ElementType and the index 139 // being the offset divded by the chunk size. If not, create 140 // a new ElementRegion at offset 0 off the raw offset region. 141 // 142 // (2b) If we don't a get a 'RegionRawOffset' after calling 143 // 'getAsRawOffset()', it means that we are at offset 0. 144 // 145 // FIXME: Handle symbolic raw offsets. 146 147 const ElementRegion *elementR = cast<ElementRegion>(R); 148 const RegionRawOffset &rawOff = elementR->getAsArrayOffset(); 149 const MemRegion *baseR = rawOff.getRegion(); 150 151 // If we cannot compute a raw offset, throw up our hands and return 152 // a NULL MemRegion*. 153 if (!baseR) 154 return NULL; 155 156 CharUnits off = rawOff.getOffset(); 157 158 if (off.isZero()) { 159 // Edge case: we are at 0 bytes off the beginning of baseR. We 160 // check to see if type we are casting to is the same as the base 161 // region. If so, just return the base region. 162 if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(baseR)) { 163 QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); 164 QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); 165 if (CanonPointeeTy == ObjTy) 166 return baseR; 167 } 168 169 // Otherwise, create a new ElementRegion at offset 0. 170 return MakeElementRegion(baseR, PointeeTy); 171 } 172 173 // We have a non-zero offset from the base region. We want to determine 174 // if the offset can be evenly divided by sizeof(PointeeTy). If so, 175 // we create an ElementRegion whose index is that value. Otherwise, we 176 // create two ElementRegions, one that reflects a raw offset and the other 177 // that reflects the cast. 178 179 // Compute the index for the new ElementRegion. 180 int64_t newIndex = 0; 181 const MemRegion *newSuperR = 0; 182 183 // We can only compute sizeof(PointeeTy) if it is a complete type. 184 if (IsCompleteType(Ctx, PointeeTy)) { 185 // Compute the size in **bytes**. 186 CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy); 187 if (!pointeeTySize.isZero()) { 188 // Is the offset a multiple of the size? If so, we can layer the 189 // ElementRegion (with elementType == PointeeTy) directly on top of 190 // the base region. 191 if (off % pointeeTySize == 0) { 192 newIndex = off / pointeeTySize; 193 newSuperR = baseR; 194 } 195 } 196 } 197 198 if (!newSuperR) { 199 // Create an intermediate ElementRegion to represent the raw byte. 200 // This will be the super region of the final ElementRegion. 201 newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity()); 202 } 203 204 return MakeElementRegion(newSuperR, PointeeTy, newIndex); 205 } 206 } 207 208 llvm_unreachable("unreachable"); 209 } 210 211 212 /// CastRetrievedVal - Used by subclasses of StoreManager to implement 213 /// implicit casts that arise from loads from regions that are reinterpreted 214 /// as another region. 215 SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R, 216 QualType castTy, bool performTestOnly) { 217 218 if (castTy.isNull() || V.isUnknownOrUndef()) 219 return V; 220 221 ASTContext &Ctx = svalBuilder.getContext(); 222 223 if (performTestOnly) { 224 // Automatically translate references to pointers. 225 QualType T = R->getValueType(); 226 if (const ReferenceType *RT = T->getAs<ReferenceType>()) 227 T = Ctx.getPointerType(RT->getPointeeType()); 228 229 assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T)); 230 return V; 231 } 232 233 return svalBuilder.dispatchCast(V, castTy); 234 } 235 236 SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) { 237 if (Base.isUnknownOrUndef()) 238 return Base; 239 240 Loc BaseL = cast<Loc>(Base); 241 const MemRegion* BaseR = 0; 242 243 switch (BaseL.getSubKind()) { 244 case loc::MemRegionKind: 245 BaseR = cast<loc::MemRegionVal>(BaseL).getRegion(); 246 break; 247 248 case loc::GotoLabelKind: 249 // These are anormal cases. Flag an undefined value. 250 return UndefinedVal(); 251 252 case loc::ConcreteIntKind: 253 // While these seem funny, this can happen through casts. 254 // FIXME: What we should return is the field offset. For example, 255 // add the field offset to the integer value. That way funny things 256 // like this work properly: &(((struct foo *) 0xa)->f) 257 return Base; 258 259 default: 260 llvm_unreachable("Unhandled Base."); 261 } 262 263 // NOTE: We must have this check first because ObjCIvarDecl is a subclass 264 // of FieldDecl. 265 if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D)) 266 return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR)); 267 268 return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR)); 269 } 270 271 SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, 272 SVal Base) { 273 274 // If the base is an unknown or undefined value, just return it back. 275 // FIXME: For absolute pointer addresses, we just return that value back as 276 // well, although in reality we should return the offset added to that 277 // value. 278 if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base)) 279 return Base; 280 281 const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion(); 282 283 // Pointer of any type can be cast and used as array base. 284 const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion); 285 286 // Convert the offset to the appropriate size and signedness. 287 Offset = cast<NonLoc>(svalBuilder.convertToArrayIndex(Offset)); 288 289 if (!ElemR) { 290 // 291 // If the base region is not an ElementRegion, create one. 292 // This can happen in the following example: 293 // 294 // char *p = __builtin_alloc(10); 295 // p[1] = 8; 296 // 297 // Observe that 'p' binds to an AllocaRegion. 298 // 299 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, 300 BaseRegion, Ctx)); 301 } 302 303 SVal BaseIdx = ElemR->getIndex(); 304 305 if (!isa<nonloc::ConcreteInt>(BaseIdx)) 306 return UnknownVal(); 307 308 const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue(); 309 310 // Only allow non-integer offsets if the base region has no offset itself. 311 // FIXME: This is a somewhat arbitrary restriction. We should be using 312 // SValBuilder here to add the two offsets without checking their types. 313 if (!isa<nonloc::ConcreteInt>(Offset)) { 314 if (isa<ElementRegion>(BaseRegion->StripCasts())) 315 return UnknownVal(); 316 317 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, 318 ElemR->getSuperRegion(), 319 Ctx)); 320 } 321 322 const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue(); 323 assert(BaseIdxI.isSigned()); 324 325 // Compute the new index. 326 nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI + 327 OffI)); 328 329 // Construct the new ElementRegion. 330 const MemRegion *ArrayR = ElemR->getSuperRegion(); 331 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR, 332 Ctx)); 333 } 334 335 StoreManager::BindingsHandler::~BindingsHandler() {} 336 337 void SubRegionMap::anchor() { } 338 void SubRegionMap::Visitor::anchor() { } 339