1 //== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defined the types Store and StoreManager. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/StaticAnalyzer/Core/PathSensitive/Store.h" 15 #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" 16 #include "clang/AST/CharUnits.h" 17 18 using namespace clang; 19 using namespace ento; 20 21 StoreManager::StoreManager(ProgramStateManager &stateMgr) 22 : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr), 23 MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {} 24 25 StoreRef StoreManager::enterStackFrame(const ProgramState *state, 26 const StackFrameContext *frame) { 27 return StoreRef(state->getStore(), *this); 28 } 29 30 const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base, 31 QualType EleTy, uint64_t index) { 32 NonLoc idx = svalBuilder.makeArrayIndex(index); 33 return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext()); 34 } 35 36 // FIXME: Merge with the implementation of the same method in MemRegion.cpp 37 static bool IsCompleteType(ASTContext &Ctx, QualType Ty) { 38 if (const RecordType *RT = Ty->getAs<RecordType>()) { 39 const RecordDecl *D = RT->getDecl(); 40 if (!D->getDefinition()) 41 return false; 42 } 43 44 return true; 45 } 46 47 StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) { 48 return StoreRef(store, *this); 49 } 50 51 const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R, 52 QualType T) { 53 NonLoc idx = svalBuilder.makeZeroArrayIndex(); 54 assert(!T.isNull()); 55 return MRMgr.getElementRegion(T, idx, R, Ctx); 56 } 57 58 const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) { 59 60 ASTContext &Ctx = StateMgr.getContext(); 61 62 // Handle casts to Objective-C objects. 63 if (CastToTy->isObjCObjectPointerType()) 64 return R->StripCasts(); 65 66 if (CastToTy->isBlockPointerType()) { 67 // FIXME: We may need different solutions, depending on the symbol 68 // involved. Blocks can be casted to/from 'id', as they can be treated 69 // as Objective-C objects. This could possibly be handled by enhancing 70 // our reasoning of downcasts of symbolic objects. 71 if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R)) 72 return R; 73 74 // We don't know what to make of it. Return a NULL region, which 75 // will be interpretted as UnknownVal. 76 return NULL; 77 } 78 79 // Now assume we are casting from pointer to pointer. Other cases should 80 // already be handled. 81 QualType PointeeTy = CastToTy->getPointeeType(); 82 QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); 83 84 // Handle casts to void*. We just pass the region through. 85 if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy) 86 return R; 87 88 // Handle casts from compatible types. 89 if (R->isBoundable()) 90 if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) { 91 QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); 92 if (CanonPointeeTy == ObjTy) 93 return R; 94 } 95 96 // Process region cast according to the kind of the region being cast. 97 switch (R->getKind()) { 98 case MemRegion::CXXThisRegionKind: 99 case MemRegion::GenericMemSpaceRegionKind: 100 case MemRegion::StackLocalsSpaceRegionKind: 101 case MemRegion::StackArgumentsSpaceRegionKind: 102 case MemRegion::HeapSpaceRegionKind: 103 case MemRegion::UnknownSpaceRegionKind: 104 case MemRegion::StaticGlobalSpaceRegionKind: 105 case MemRegion::GlobalInternalSpaceRegionKind: 106 case MemRegion::GlobalSystemSpaceRegionKind: 107 case MemRegion::GlobalImmutableSpaceRegionKind: { 108 llvm_unreachable("Invalid region cast"); 109 } 110 111 case MemRegion::FunctionTextRegionKind: 112 case MemRegion::BlockTextRegionKind: 113 case MemRegion::BlockDataRegionKind: 114 case MemRegion::StringRegionKind: 115 // FIXME: Need to handle arbitrary downcasts. 116 case MemRegion::SymbolicRegionKind: 117 case MemRegion::AllocaRegionKind: 118 case MemRegion::CompoundLiteralRegionKind: 119 case MemRegion::FieldRegionKind: 120 case MemRegion::ObjCIvarRegionKind: 121 case MemRegion::VarRegionKind: 122 case MemRegion::CXXTempObjectRegionKind: 123 case MemRegion::CXXBaseObjectRegionKind: 124 return MakeElementRegion(R, PointeeTy); 125 126 case MemRegion::ElementRegionKind: { 127 // If we are casting from an ElementRegion to another type, the 128 // algorithm is as follows: 129 // 130 // (1) Compute the "raw offset" of the ElementRegion from the 131 // base region. This is done by calling 'getAsRawOffset()'. 132 // 133 // (2a) If we get a 'RegionRawOffset' after calling 134 // 'getAsRawOffset()', determine if the absolute offset 135 // can be exactly divided into chunks of the size of the 136 // casted-pointee type. If so, create a new ElementRegion with 137 // the pointee-cast type as the new ElementType and the index 138 // being the offset divded by the chunk size. If not, create 139 // a new ElementRegion at offset 0 off the raw offset region. 140 // 141 // (2b) If we don't a get a 'RegionRawOffset' after calling 142 // 'getAsRawOffset()', it means that we are at offset 0. 143 // 144 // FIXME: Handle symbolic raw offsets. 145 146 const ElementRegion *elementR = cast<ElementRegion>(R); 147 const RegionRawOffset &rawOff = elementR->getAsArrayOffset(); 148 const MemRegion *baseR = rawOff.getRegion(); 149 150 // If we cannot compute a raw offset, throw up our hands and return 151 // a NULL MemRegion*. 152 if (!baseR) 153 return NULL; 154 155 CharUnits off = rawOff.getOffset(); 156 157 if (off.isZero()) { 158 // Edge case: we are at 0 bytes off the beginning of baseR. We 159 // check to see if type we are casting to is the same as the base 160 // region. If so, just return the base region. 161 if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(baseR)) { 162 QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); 163 QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); 164 if (CanonPointeeTy == ObjTy) 165 return baseR; 166 } 167 168 // Otherwise, create a new ElementRegion at offset 0. 169 return MakeElementRegion(baseR, PointeeTy); 170 } 171 172 // We have a non-zero offset from the base region. We want to determine 173 // if the offset can be evenly divided by sizeof(PointeeTy). If so, 174 // we create an ElementRegion whose index is that value. Otherwise, we 175 // create two ElementRegions, one that reflects a raw offset and the other 176 // that reflects the cast. 177 178 // Compute the index for the new ElementRegion. 179 int64_t newIndex = 0; 180 const MemRegion *newSuperR = 0; 181 182 // We can only compute sizeof(PointeeTy) if it is a complete type. 183 if (IsCompleteType(Ctx, PointeeTy)) { 184 // Compute the size in **bytes**. 185 CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy); 186 if (!pointeeTySize.isZero()) { 187 // Is the offset a multiple of the size? If so, we can layer the 188 // ElementRegion (with elementType == PointeeTy) directly on top of 189 // the base region. 190 if (off % pointeeTySize == 0) { 191 newIndex = off / pointeeTySize; 192 newSuperR = baseR; 193 } 194 } 195 } 196 197 if (!newSuperR) { 198 // Create an intermediate ElementRegion to represent the raw byte. 199 // This will be the super region of the final ElementRegion. 200 newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity()); 201 } 202 203 return MakeElementRegion(newSuperR, PointeeTy, newIndex); 204 } 205 } 206 207 llvm_unreachable("unreachable"); 208 } 209 210 211 /// CastRetrievedVal - Used by subclasses of StoreManager to implement 212 /// implicit casts that arise from loads from regions that are reinterpreted 213 /// as another region. 214 SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R, 215 QualType castTy, bool performTestOnly) { 216 217 if (castTy.isNull() || V.isUnknownOrUndef()) 218 return V; 219 220 ASTContext &Ctx = svalBuilder.getContext(); 221 222 if (performTestOnly) { 223 // Automatically translate references to pointers. 224 QualType T = R->getValueType(); 225 if (const ReferenceType *RT = T->getAs<ReferenceType>()) 226 T = Ctx.getPointerType(RT->getPointeeType()); 227 228 assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T)); 229 return V; 230 } 231 232 return svalBuilder.dispatchCast(V, castTy); 233 } 234 235 SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) { 236 if (Base.isUnknownOrUndef()) 237 return Base; 238 239 Loc BaseL = cast<Loc>(Base); 240 const MemRegion* BaseR = 0; 241 242 switch (BaseL.getSubKind()) { 243 case loc::MemRegionKind: 244 BaseR = cast<loc::MemRegionVal>(BaseL).getRegion(); 245 break; 246 247 case loc::GotoLabelKind: 248 // These are anormal cases. Flag an undefined value. 249 return UndefinedVal(); 250 251 case loc::ConcreteIntKind: 252 // While these seem funny, this can happen through casts. 253 // FIXME: What we should return is the field offset. For example, 254 // add the field offset to the integer value. That way funny things 255 // like this work properly: &(((struct foo *) 0xa)->f) 256 return Base; 257 258 default: 259 llvm_unreachable("Unhandled Base."); 260 } 261 262 // NOTE: We must have this check first because ObjCIvarDecl is a subclass 263 // of FieldDecl. 264 if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D)) 265 return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR)); 266 267 return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR)); 268 } 269 270 SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, 271 SVal Base) { 272 273 // If the base is an unknown or undefined value, just return it back. 274 // FIXME: For absolute pointer addresses, we just return that value back as 275 // well, although in reality we should return the offset added to that 276 // value. 277 if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base)) 278 return Base; 279 280 const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion(); 281 282 // Pointer of any type can be cast and used as array base. 283 const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion); 284 285 // Convert the offset to the appropriate size and signedness. 286 Offset = cast<NonLoc>(svalBuilder.convertToArrayIndex(Offset)); 287 288 if (!ElemR) { 289 // 290 // If the base region is not an ElementRegion, create one. 291 // This can happen in the following example: 292 // 293 // char *p = __builtin_alloc(10); 294 // p[1] = 8; 295 // 296 // Observe that 'p' binds to an AllocaRegion. 297 // 298 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, 299 BaseRegion, Ctx)); 300 } 301 302 SVal BaseIdx = ElemR->getIndex(); 303 304 if (!isa<nonloc::ConcreteInt>(BaseIdx)) 305 return UnknownVal(); 306 307 const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue(); 308 309 // Only allow non-integer offsets if the base region has no offset itself. 310 // FIXME: This is a somewhat arbitrary restriction. We should be using 311 // SValBuilder here to add the two offsets without checking their types. 312 if (!isa<nonloc::ConcreteInt>(Offset)) { 313 if (isa<ElementRegion>(BaseRegion->StripCasts())) 314 return UnknownVal(); 315 316 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, 317 ElemR->getSuperRegion(), 318 Ctx)); 319 } 320 321 const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue(); 322 assert(BaseIdxI.isSigned()); 323 324 // Compute the new index. 325 nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI + 326 OffI)); 327 328 // Construct the new ElementRegion. 329 const MemRegion *ArrayR = ElemR->getSuperRegion(); 330 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR, 331 Ctx)); 332 } 333 334 StoreManager::BindingsHandler::~BindingsHandler() {} 335 336 void SubRegionMap::anchor() { } 337 void SubRegionMap::Visitor::anchor() { } 338