1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/TargetFolder.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/Utils/Local.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/Argument.h" 25 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GlobalAlias.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/Instruction.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Operator.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include <cassert> 43 #include <cstdint> 44 #include <iterator> 45 #include <numeric> 46 #include <type_traits> 47 #include <utility> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "memory-builtins" 52 53 enum AllocType : uint8_t { 54 OpNewLike = 1<<0, // allocates; never returns null 55 MallocLike = 1<<1, // allocates; may return null 56 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 57 CallocLike = 1<<3, // allocates + bzero 58 ReallocLike = 1<<4, // reallocates 59 StrDupLike = 1<<5, 60 MallocOrOpNewLike = MallocLike | OpNewLike, 61 MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike, 62 AllocLike = MallocOrCallocLike | StrDupLike, 63 AnyAlloc = AllocLike | ReallocLike 64 }; 65 66 enum class MallocFamily { 67 Malloc, 68 CPPNew, // new(unsigned int) 69 CPPNewAligned, // new(unsigned int, align_val_t) 70 CPPNewArray, // new[](unsigned int) 71 CPPNewArrayAligned, // new[](unsigned long, align_val_t) 72 MSVCNew, // new(unsigned int) 73 MSVCArrayNew, // new[](unsigned int) 74 VecMalloc, 75 KmpcAllocShared, 76 }; 77 78 StringRef mangledNameForMallocFamily(const MallocFamily &Family) { 79 switch (Family) { 80 case MallocFamily::Malloc: 81 return "malloc"; 82 case MallocFamily::CPPNew: 83 return "_Znwm"; 84 case MallocFamily::CPPNewAligned: 85 return "_ZnwmSt11align_val_t"; 86 case MallocFamily::CPPNewArray: 87 return "_Znam"; 88 case MallocFamily::CPPNewArrayAligned: 89 return "_ZnamSt11align_val_t"; 90 case MallocFamily::MSVCNew: 91 return "??2@YAPAXI@Z"; 92 case MallocFamily::MSVCArrayNew: 93 return "??_U@YAPAXI@Z"; 94 case MallocFamily::VecMalloc: 95 return "vec_malloc"; 96 case MallocFamily::KmpcAllocShared: 97 return "__kmpc_alloc_shared"; 98 } 99 llvm_unreachable("missing an alloc family"); 100 } 101 102 struct AllocFnsTy { 103 AllocType AllocTy; 104 unsigned NumParams; 105 // First and Second size parameters (or -1 if unused) 106 int FstParam, SndParam; 107 // Alignment parameter for aligned_alloc and aligned new 108 int AlignParam; 109 // Name of default allocator function to group malloc/free calls by family 110 MallocFamily Family; 111 }; 112 113 // clang-format off 114 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 115 // know which functions are nounwind, noalias, nocapture parameters, etc. 116 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 117 {LibFunc_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}}, 118 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::VecMalloc}}, 119 {LibFunc_valloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}}, 120 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int) 121 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int, nothrow) 122 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t) 123 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t, nothrow) 124 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long) 125 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long, nothrow) 126 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t) 127 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t, nothrow) 128 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int) 129 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int, nothrow) 130 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t) 131 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t, nothrow) 132 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long) 133 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long, nothrow) 134 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t) 135 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t, nothrow) 136 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int) 137 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int, nothrow) 138 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long) 139 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long, nothrow) 140 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int) 141 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int, nothrow) 142 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long) 143 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long, nothrow) 144 {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}}, 145 {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}}, 146 {LibFunc_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::Malloc}}, 147 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::VecMalloc}}, 148 {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 149 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::VecMalloc}}, 150 {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 151 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 152 {LibFunc_dunder_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 153 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 154 {LibFunc_dunder_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 155 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1, MallocFamily::KmpcAllocShared}}, 156 // TODO: Handle "int posix_memalign(void **, size_t, size_t)" 157 }; 158 // clang-format on 159 160 static const Function *getCalledFunction(const Value *V, 161 bool &IsNoBuiltin) { 162 // Don't care about intrinsics in this case. 163 if (isa<IntrinsicInst>(V)) 164 return nullptr; 165 166 const auto *CB = dyn_cast<CallBase>(V); 167 if (!CB) 168 return nullptr; 169 170 IsNoBuiltin = CB->isNoBuiltin(); 171 172 if (const Function *Callee = CB->getCalledFunction()) 173 return Callee; 174 return nullptr; 175 } 176 177 /// Returns the allocation data for the given value if it's a call to a known 178 /// allocation function. 179 static Optional<AllocFnsTy> 180 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 181 const TargetLibraryInfo *TLI) { 182 // Make sure that the function is available. 183 LibFunc TLIFn; 184 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 185 return None; 186 187 const auto *Iter = find_if( 188 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 189 return P.first == TLIFn; 190 }); 191 192 if (Iter == std::end(AllocationFnData)) 193 return None; 194 195 const AllocFnsTy *FnData = &Iter->second; 196 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 197 return None; 198 199 // Check function prototype. 200 int FstParam = FnData->FstParam; 201 int SndParam = FnData->SndParam; 202 FunctionType *FTy = Callee->getFunctionType(); 203 204 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 205 FTy->getNumParams() == FnData->NumParams && 206 (FstParam < 0 || 207 (FTy->getParamType(FstParam)->isIntegerTy(32) || 208 FTy->getParamType(FstParam)->isIntegerTy(64))) && 209 (SndParam < 0 || 210 FTy->getParamType(SndParam)->isIntegerTy(32) || 211 FTy->getParamType(SndParam)->isIntegerTy(64))) 212 return *FnData; 213 return None; 214 } 215 216 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 217 const TargetLibraryInfo *TLI) { 218 bool IsNoBuiltinCall; 219 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 220 if (!IsNoBuiltinCall) 221 return getAllocationDataForFunction(Callee, AllocTy, TLI); 222 return None; 223 } 224 225 static Optional<AllocFnsTy> 226 getAllocationData(const Value *V, AllocType AllocTy, 227 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 228 bool IsNoBuiltinCall; 229 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 230 if (!IsNoBuiltinCall) 231 return getAllocationDataForFunction( 232 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 233 return None; 234 } 235 236 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 237 const TargetLibraryInfo *TLI) { 238 bool IsNoBuiltinCall; 239 const Function *Callee = 240 getCalledFunction(V, IsNoBuiltinCall); 241 if (!Callee) 242 return None; 243 244 // Prefer to use existing information over allocsize. This will give us an 245 // accurate AllocTy. 246 if (!IsNoBuiltinCall) 247 if (Optional<AllocFnsTy> Data = 248 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 249 return Data; 250 251 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 252 if (Attr == Attribute()) 253 return None; 254 255 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 256 257 AllocFnsTy Result; 258 // Because allocsize only tells us how many bytes are allocated, we're not 259 // really allowed to assume anything, so we use MallocLike. 260 Result.AllocTy = MallocLike; 261 Result.NumParams = Callee->getNumOperands(); 262 Result.FstParam = Args.first; 263 Result.SndParam = Args.second.getValueOr(-1); 264 // Allocsize has no way to specify an alignment argument 265 Result.AlignParam = -1; 266 return Result; 267 } 268 269 /// Tests if a value is a call or invoke to a library function that 270 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 271 /// like). 272 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 273 return getAllocationData(V, AnyAlloc, TLI).hasValue(); 274 } 275 bool llvm::isAllocationFn( 276 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 277 return getAllocationData(V, AnyAlloc, GetTLI).hasValue(); 278 } 279 280 /// Tests if a value is a call or invoke to a library function that 281 /// allocates uninitialized memory (such as malloc). 282 static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 283 return getAllocationData(V, MallocOrOpNewLike, TLI).hasValue(); 284 } 285 286 /// Tests if a value is a call or invoke to a library function that 287 /// allocates uninitialized memory with alignment (such as aligned_alloc). 288 static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 289 return getAllocationData(V, AlignedAllocLike, TLI) 290 .hasValue(); 291 } 292 293 /// Tests if a value is a call or invoke to a library function that 294 /// allocates zero-filled memory (such as calloc). 295 static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 296 return getAllocationData(V, CallocLike, TLI).hasValue(); 297 } 298 299 /// Tests if a value is a call or invoke to a library function that 300 /// allocates memory similar to malloc or calloc. 301 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 302 return getAllocationData(V, MallocOrCallocLike, TLI).hasValue(); 303 } 304 305 /// Tests if a value is a call or invoke to a library function that 306 /// allocates memory (either malloc, calloc, or strdup like). 307 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 308 return getAllocationData(V, AllocLike, TLI).hasValue(); 309 } 310 311 /// Tests if a value is a call or invoke to a library function that 312 /// reallocates memory (e.g., realloc). 313 bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 314 return getAllocationData(V, ReallocLike, TLI).hasValue(); 315 } 316 317 /// Tests if a functions is a call or invoke to a library function that 318 /// reallocates memory (e.g., realloc). 319 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 320 return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue(); 321 } 322 323 bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) { 324 assert(isAllocationFn(CB, TLI)); 325 326 // Note: Removability is highly dependent on the source language. For 327 // example, recent C++ requires direct calls to the global allocation 328 // [basic.stc.dynamic.allocation] to be observable unless part of a new 329 // expression [expr.new paragraph 13]. 330 331 // Historically we've treated the C family allocation routines as removable 332 return isAllocLikeFn(CB, TLI); 333 } 334 335 Value *llvm::getAllocAlignment(const CallBase *V, 336 const TargetLibraryInfo *TLI) { 337 assert(isAllocationFn(V, TLI)); 338 339 const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 340 if (FnData.hasValue() && FnData->AlignParam >= 0) { 341 return V->getOperand(FnData->AlignParam); 342 } 343 unsigned AllocAlignParam; 344 if (V->getAttributes().hasAttrSomewhere(Attribute::AllocAlign, 345 &AllocAlignParam)) { 346 return V->getOperand(AllocAlignParam-1); 347 } 348 return nullptr; 349 } 350 351 /// When we're compiling N-bit code, and the user uses parameters that are 352 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 353 /// trouble with APInt size issues. This function handles resizing + overflow 354 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 355 /// I's value. 356 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 357 // More bits than we can handle. Checking the bit width isn't necessary, but 358 // it's faster than checking active bits, and should give `false` in the 359 // vast majority of cases. 360 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 361 return false; 362 if (I.getBitWidth() != IntTyBits) 363 I = I.zextOrTrunc(IntTyBits); 364 return true; 365 } 366 367 Optional<APInt> 368 llvm::getAllocSize(const CallBase *CB, 369 const TargetLibraryInfo *TLI, 370 std::function<const Value*(const Value*)> Mapper) { 371 // Note: This handles both explicitly listed allocation functions and 372 // allocsize. The code structure could stand to be cleaned up a bit. 373 Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 374 if (!FnData) 375 return None; 376 377 // Get the index type for this address space, results and intermediate 378 // computations are performed at that width. 379 auto &DL = CB->getModule()->getDataLayout(); 380 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 381 382 // Handle strdup-like functions separately. 383 if (FnData->AllocTy == StrDupLike) { 384 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 385 if (!Size) 386 return None; 387 388 // Strndup limits strlen. 389 if (FnData->FstParam > 0) { 390 const ConstantInt *Arg = 391 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 392 if (!Arg) 393 return None; 394 395 APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); 396 if (Size.ugt(MaxSize)) 397 Size = MaxSize + 1; 398 } 399 return Size; 400 } 401 402 const ConstantInt *Arg = 403 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 404 if (!Arg) 405 return None; 406 407 APInt Size = Arg->getValue(); 408 if (!CheckedZextOrTrunc(Size, IntTyBits)) 409 return None; 410 411 // Size is determined by just 1 parameter. 412 if (FnData->SndParam < 0) 413 return Size; 414 415 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 416 if (!Arg) 417 return None; 418 419 APInt NumElems = Arg->getValue(); 420 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 421 return None; 422 423 bool Overflow; 424 Size = Size.umul_ov(NumElems, Overflow); 425 if (Overflow) 426 return None; 427 return Size; 428 } 429 430 Constant *llvm::getInitialValueOfAllocation(const CallBase *Alloc, 431 const TargetLibraryInfo *TLI, 432 Type *Ty) { 433 assert(isAllocationFn(Alloc, TLI)); 434 435 // malloc and aligned_alloc are uninitialized (undef) 436 if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI)) 437 return UndefValue::get(Ty); 438 439 // calloc zero initializes 440 if (isCallocLikeFn(Alloc, TLI)) 441 return Constant::getNullValue(Ty); 442 443 return nullptr; 444 } 445 446 struct FreeFnsTy { 447 unsigned NumParams; 448 // Name of default allocator function to group malloc/free calls by family 449 MallocFamily Family; 450 }; 451 452 // clang-format off 453 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 454 {LibFunc_free, {1, MallocFamily::Malloc}}, 455 {LibFunc_ZdlPv, {1, MallocFamily::CPPNew}}, // operator delete(void*) 456 {LibFunc_ZdaPv, {1, MallocFamily::CPPNewArray}}, // operator delete[](void*) 457 {LibFunc_msvc_delete_ptr32, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 458 {LibFunc_msvc_delete_ptr64, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 459 {LibFunc_msvc_delete_array_ptr32, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 460 {LibFunc_msvc_delete_array_ptr64, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 461 {LibFunc_ZdlPvj, {2, MallocFamily::CPPNew}}, // delete(void*, uint) 462 {LibFunc_ZdlPvm, {2, MallocFamily::CPPNew}}, // delete(void*, ulong) 463 {LibFunc_ZdlPvRKSt9nothrow_t, {2, MallocFamily::CPPNew}}, // delete(void*, nothrow) 464 {LibFunc_ZdlPvSt11align_val_t, {2, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t) 465 {LibFunc_ZdaPvj, {2, MallocFamily::CPPNewArray}}, // delete[](void*, uint) 466 {LibFunc_ZdaPvm, {2, MallocFamily::CPPNewArray}}, // delete[](void*, ulong) 467 {LibFunc_ZdaPvRKSt9nothrow_t, {2, MallocFamily::CPPNewArray}}, // delete[](void*, nothrow) 468 {LibFunc_ZdaPvSt11align_val_t, {2, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t) 469 {LibFunc_msvc_delete_ptr32_int, {2, MallocFamily::MSVCNew}}, // delete(void*, uint) 470 {LibFunc_msvc_delete_ptr64_longlong, {2, MallocFamily::MSVCNew}}, // delete(void*, ulonglong) 471 {LibFunc_msvc_delete_ptr32_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 472 {LibFunc_msvc_delete_ptr64_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 473 {LibFunc_msvc_delete_array_ptr32_int, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, uint) 474 {LibFunc_msvc_delete_array_ptr64_longlong, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, ulonglong) 475 {LibFunc_msvc_delete_array_ptr32_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 476 {LibFunc_msvc_delete_array_ptr64_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 477 {LibFunc___kmpc_free_shared, {2, MallocFamily::KmpcAllocShared}}, // OpenMP Offloading RTL free 478 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t, nothrow) 479 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t, nothrow) 480 {LibFunc_ZdlPvjSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned int, align_val_t) 481 {LibFunc_ZdlPvmSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned long, align_val_t) 482 {LibFunc_ZdaPvjSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned int, align_val_t) 483 {LibFunc_ZdaPvmSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned long, align_val_t) 484 }; 485 // clang-format on 486 487 Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 488 const LibFunc TLIFn) { 489 const auto *Iter = 490 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 491 return P.first == TLIFn; 492 }); 493 if (Iter == std::end(FreeFnData)) 494 return None; 495 return Iter->second; 496 } 497 498 Optional<StringRef> llvm::getAllocationFamily(const Value *I, 499 const TargetLibraryInfo *TLI) { 500 bool IsNoBuiltin; 501 const Function *Callee = getCalledFunction(I, IsNoBuiltin); 502 if (Callee == nullptr) 503 return None; 504 LibFunc TLIFn; 505 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 506 return None; 507 const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI); 508 if (AllocData.hasValue()) 509 return mangledNameForMallocFamily(AllocData.getValue().Family); 510 const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn); 511 if (FreeData.hasValue()) 512 return mangledNameForMallocFamily(FreeData.getValue().Family); 513 return None; 514 } 515 516 /// isLibFreeFunction - Returns true if the function is a builtin free() 517 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 518 Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 519 if (!FnData.hasValue()) 520 return false; 521 522 // Check free prototype. 523 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 524 // attribute will exist. 525 FunctionType *FTy = F->getFunctionType(); 526 if (!FTy->getReturnType()->isVoidTy()) 527 return false; 528 if (FTy->getNumParams() != FnData->NumParams) 529 return false; 530 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 531 return false; 532 533 return true; 534 } 535 536 /// isFreeCall - Returns non-null if the value is a call to the builtin free() 537 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { 538 bool IsNoBuiltinCall; 539 const Function *Callee = getCalledFunction(I, IsNoBuiltinCall); 540 if (Callee == nullptr || IsNoBuiltinCall) 541 return nullptr; 542 543 LibFunc TLIFn; 544 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 545 return nullptr; 546 547 return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr; 548 } 549 550 551 //===----------------------------------------------------------------------===// 552 // Utility functions to compute size of objects. 553 // 554 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 555 if (Data.second.isNegative() || Data.first.ult(Data.second)) 556 return APInt(Data.first.getBitWidth(), 0); 557 return Data.first - Data.second; 558 } 559 560 /// Compute the size of the object pointed by Ptr. Returns true and the 561 /// object size in Size if successful, and false otherwise. 562 /// If RoundToAlign is true, then Size is rounded up to the alignment of 563 /// allocas, byval arguments, and global variables. 564 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 565 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 566 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 567 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 568 if (!Visitor.bothKnown(Data)) 569 return false; 570 571 Size = getSizeWithOverflow(Data).getZExtValue(); 572 return true; 573 } 574 575 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 576 const DataLayout &DL, 577 const TargetLibraryInfo *TLI, 578 bool MustSucceed) { 579 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 580 "ObjectSize must be a call to llvm.objectsize!"); 581 582 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 583 ObjectSizeOpts EvalOptions; 584 // Unless we have to fold this to something, try to be as accurate as 585 // possible. 586 if (MustSucceed) 587 EvalOptions.EvalMode = 588 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 589 else 590 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 591 592 EvalOptions.NullIsUnknownSize = 593 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 594 595 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 596 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 597 if (StaticOnly) { 598 // FIXME: Does it make sense to just return a failure value if the size won't 599 // fit in the output and `!MustSucceed`? 600 uint64_t Size; 601 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 602 isUIntN(ResultType->getBitWidth(), Size)) 603 return ConstantInt::get(ResultType, Size); 604 } else { 605 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 606 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 607 SizeOffsetEvalType SizeOffsetPair = 608 Eval.compute(ObjectSize->getArgOperand(0)); 609 610 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 611 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 612 Builder.SetInsertPoint(ObjectSize); 613 614 // If we've outside the end of the object, then we can always access 615 // exactly 0 bytes. 616 Value *ResultSize = 617 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 618 Value *UseZero = 619 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 620 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 621 Value *Ret = Builder.CreateSelect( 622 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 623 624 // The non-constant size expression cannot evaluate to -1. 625 if (!isa<Constant>(SizeOffsetPair.first) || 626 !isa<Constant>(SizeOffsetPair.second)) 627 Builder.CreateAssumption( 628 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 629 630 return Ret; 631 } 632 } 633 634 if (!MustSucceed) 635 return nullptr; 636 637 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 638 } 639 640 STATISTIC(ObjectVisitorArgument, 641 "Number of arguments with unsolved size and offset"); 642 STATISTIC(ObjectVisitorLoad, 643 "Number of load instructions with unsolved size and offset"); 644 645 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 646 if (Options.RoundToAlign && Alignment) 647 return APInt(IntTyBits, alignTo(Size.getZExtValue(), Alignment)); 648 return Size; 649 } 650 651 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 652 const TargetLibraryInfo *TLI, 653 LLVMContext &Context, 654 ObjectSizeOpts Options) 655 : DL(DL), TLI(TLI), Options(Options) { 656 // Pointer size must be rechecked for each object visited since it could have 657 // a different address space. 658 } 659 660 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 661 unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 662 663 // Stripping pointer casts can strip address space casts which can change the 664 // index type size. The invariant is that we use the value type to determine 665 // the index type size and if we stripped address space casts we have to 666 // readjust the APInt as we pass it upwards in order for the APInt to match 667 // the type the caller passed in. 668 APInt Offset(InitialIntTyBits, 0); 669 V = V->stripAndAccumulateConstantOffsets( 670 DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true); 671 672 // Later we use the index type size and zero but it will match the type of the 673 // value that is passed to computeImpl. 674 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 675 Zero = APInt::getZero(IntTyBits); 676 677 bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; 678 if (!IndexTypeSizeChanged && Offset.isZero()) 679 return computeImpl(V); 680 681 // We stripped an address space cast that changed the index type size or we 682 // accumulated some constant offset (or both). Readjust the bit width to match 683 // the argument index type size and apply the offset, as required. 684 SizeOffsetType SOT = computeImpl(V); 685 if (IndexTypeSizeChanged) { 686 if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) 687 SOT.first = APInt(); 688 if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) 689 SOT.second = APInt(); 690 } 691 // If the computed offset is "unknown" we cannot add the stripped offset. 692 return {SOT.first, 693 SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; 694 } 695 696 SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { 697 if (Instruction *I = dyn_cast<Instruction>(V)) { 698 // If we have already seen this instruction, bail out. Cycles can happen in 699 // unreachable code after constant propagation. 700 if (!SeenInsts.insert(I).second) 701 return unknown(); 702 703 return visit(*I); 704 } 705 if (Argument *A = dyn_cast<Argument>(V)) 706 return visitArgument(*A); 707 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 708 return visitConstantPointerNull(*P); 709 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 710 return visitGlobalAlias(*GA); 711 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 712 return visitGlobalVariable(*GV); 713 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 714 return visitUndefValue(*UV); 715 716 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 717 << *V << '\n'); 718 return unknown(); 719 } 720 721 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 722 return ::CheckedZextOrTrunc(I, IntTyBits); 723 } 724 725 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 726 if (!I.getAllocatedType()->isSized()) 727 return unknown(); 728 729 TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); 730 if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) 731 return unknown(); 732 APInt Size(IntTyBits, ElemSize.getKnownMinSize()); 733 if (!I.isArrayAllocation()) 734 return std::make_pair(align(Size, I.getAlign()), Zero); 735 736 Value *ArraySize = I.getArraySize(); 737 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 738 APInt NumElems = C->getValue(); 739 if (!CheckedZextOrTrunc(NumElems)) 740 return unknown(); 741 742 bool Overflow; 743 Size = Size.umul_ov(NumElems, Overflow); 744 return Overflow ? unknown() 745 : std::make_pair(align(Size, I.getAlign()), Zero); 746 } 747 return unknown(); 748 } 749 750 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 751 Type *MemoryTy = A.getPointeeInMemoryValueType(); 752 // No interprocedural analysis is done at the moment. 753 if (!MemoryTy|| !MemoryTy->isSized()) { 754 ++ObjectVisitorArgument; 755 return unknown(); 756 } 757 758 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 759 return std::make_pair(align(Size, A.getParamAlign()), Zero); 760 } 761 762 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 763 auto Mapper = [](const Value *V) { return V; }; 764 if (Optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) 765 return std::make_pair(*Size, Zero); 766 return unknown(); 767 } 768 769 SizeOffsetType 770 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 771 // If null is unknown, there's nothing we can do. Additionally, non-zero 772 // address spaces can make use of null, so we don't presume to know anything 773 // about that. 774 // 775 // TODO: How should this work with address space casts? We currently just drop 776 // them on the floor, but it's unclear what we should do when a NULL from 777 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 778 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 779 return unknown(); 780 return std::make_pair(Zero, Zero); 781 } 782 783 SizeOffsetType 784 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 785 return unknown(); 786 } 787 788 SizeOffsetType 789 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 790 // Easy cases were already folded by previous passes. 791 return unknown(); 792 } 793 794 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 795 if (GA.isInterposable()) 796 return unknown(); 797 return compute(GA.getAliasee()); 798 } 799 800 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 801 if (!GV.hasDefinitiveInitializer()) 802 return unknown(); 803 804 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 805 return std::make_pair(align(Size, GV.getAlign()), Zero); 806 } 807 808 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 809 // clueless 810 return unknown(); 811 } 812 813 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { 814 ++ObjectVisitorLoad; 815 return unknown(); 816 } 817 818 SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS, 819 SizeOffsetType RHS) { 820 if (!bothKnown(LHS) || !bothKnown(RHS)) 821 return unknown(); 822 823 switch (Options.EvalMode) { 824 case ObjectSizeOpts::Mode::Min: 825 return (getSizeWithOverflow(LHS).slt(getSizeWithOverflow(RHS))) ? LHS : RHS; 826 case ObjectSizeOpts::Mode::Max: 827 return (getSizeWithOverflow(LHS).sgt(getSizeWithOverflow(RHS))) ? LHS : RHS; 828 case ObjectSizeOpts::Mode::Exact: 829 return (getSizeWithOverflow(LHS).eq(getSizeWithOverflow(RHS))) ? LHS 830 : unknown(); 831 } 832 llvm_unreachable("missing an eval mode"); 833 } 834 835 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode &PN) { 836 auto IncomingValues = PN.incoming_values(); 837 return std::accumulate(IncomingValues.begin() + 1, IncomingValues.end(), 838 compute(*IncomingValues.begin()), 839 [this](SizeOffsetType LHS, Value *VRHS) { 840 return combineSizeOffset(LHS, compute(VRHS)); 841 }); 842 } 843 844 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 845 return combineSizeOffset(compute(I.getTrueValue()), 846 compute(I.getFalseValue())); 847 } 848 849 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 850 return std::make_pair(Zero, Zero); 851 } 852 853 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 854 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 855 << '\n'); 856 return unknown(); 857 } 858 859 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 860 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 861 ObjectSizeOpts EvalOpts) 862 : DL(DL), TLI(TLI), Context(Context), 863 Builder(Context, TargetFolder(DL), 864 IRBuilderCallbackInserter( 865 [&](Instruction *I) { InsertedInstructions.insert(I); })), 866 EvalOpts(EvalOpts) { 867 // IntTy and Zero must be set for each compute() since the address space may 868 // be different for later objects. 869 } 870 871 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 872 // XXX - Are vectors of pointers possible here? 873 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 874 Zero = ConstantInt::get(IntTy, 0); 875 876 SizeOffsetEvalType Result = compute_(V); 877 878 if (!bothKnown(Result)) { 879 // Erase everything that was computed in this iteration from the cache, so 880 // that no dangling references are left behind. We could be a bit smarter if 881 // we kept a dependency graph. It's probably not worth the complexity. 882 for (const Value *SeenVal : SeenVals) { 883 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 884 // non-computable results can be safely cached 885 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 886 CacheMap.erase(CacheIt); 887 } 888 889 // Erase any instructions we inserted as part of the traversal. 890 for (Instruction *I : InsertedInstructions) { 891 I->replaceAllUsesWith(UndefValue::get(I->getType())); 892 I->eraseFromParent(); 893 } 894 } 895 896 SeenVals.clear(); 897 InsertedInstructions.clear(); 898 return Result; 899 } 900 901 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 902 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 903 SizeOffsetType Const = Visitor.compute(V); 904 if (Visitor.bothKnown(Const)) 905 return std::make_pair(ConstantInt::get(Context, Const.first), 906 ConstantInt::get(Context, Const.second)); 907 908 V = V->stripPointerCasts(); 909 910 // Check cache. 911 CacheMapTy::iterator CacheIt = CacheMap.find(V); 912 if (CacheIt != CacheMap.end()) 913 return CacheIt->second; 914 915 // Always generate code immediately before the instruction being 916 // processed, so that the generated code dominates the same BBs. 917 BuilderTy::InsertPointGuard Guard(Builder); 918 if (Instruction *I = dyn_cast<Instruction>(V)) 919 Builder.SetInsertPoint(I); 920 921 // Now compute the size and offset. 922 SizeOffsetEvalType Result; 923 924 // Record the pointers that were handled in this run, so that they can be 925 // cleaned later if something fails. We also use this set to break cycles that 926 // can occur in dead code. 927 if (!SeenVals.insert(V).second) { 928 Result = unknown(); 929 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 930 Result = visitGEPOperator(*GEP); 931 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 932 Result = visit(*I); 933 } else if (isa<Argument>(V) || 934 (isa<ConstantExpr>(V) && 935 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 936 isa<GlobalAlias>(V) || 937 isa<GlobalVariable>(V)) { 938 // Ignore values where we cannot do more than ObjectSizeVisitor. 939 Result = unknown(); 940 } else { 941 LLVM_DEBUG( 942 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 943 << '\n'); 944 Result = unknown(); 945 } 946 947 // Don't reuse CacheIt since it may be invalid at this point. 948 CacheMap[V] = Result; 949 return Result; 950 } 951 952 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 953 if (!I.getAllocatedType()->isSized()) 954 return unknown(); 955 956 // must be a VLA 957 assert(I.isArrayAllocation()); 958 959 // If needed, adjust the alloca's operand size to match the pointer size. 960 // Subsequent math operations expect the types to match. 961 Value *ArraySize = Builder.CreateZExtOrTrunc( 962 I.getArraySize(), DL.getIntPtrType(I.getContext())); 963 assert(ArraySize->getType() == Zero->getType() && 964 "Expected zero constant to have pointer type"); 965 966 Value *Size = ConstantInt::get(ArraySize->getType(), 967 DL.getTypeAllocSize(I.getAllocatedType())); 968 Size = Builder.CreateMul(Size, ArraySize); 969 return std::make_pair(Size, Zero); 970 } 971 972 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 973 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 974 if (!FnData) 975 return unknown(); 976 977 // Handle strdup-like functions separately. 978 if (FnData->AllocTy == StrDupLike) { 979 // TODO: implement evaluation of strdup/strndup 980 return unknown(); 981 } 982 983 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 984 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 985 if (FnData->SndParam < 0) 986 return std::make_pair(FirstArg, Zero); 987 988 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 989 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 990 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 991 return std::make_pair(Size, Zero); 992 } 993 994 SizeOffsetEvalType 995 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 996 return unknown(); 997 } 998 999 SizeOffsetEvalType 1000 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 1001 return unknown(); 1002 } 1003 1004 SizeOffsetEvalType 1005 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 1006 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 1007 if (!bothKnown(PtrData)) 1008 return unknown(); 1009 1010 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 1011 Offset = Builder.CreateAdd(PtrData.second, Offset); 1012 return std::make_pair(PtrData.first, Offset); 1013 } 1014 1015 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 1016 // clueless 1017 return unknown(); 1018 } 1019 1020 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { 1021 return unknown(); 1022 } 1023 1024 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 1025 // Create 2 PHIs: one for size and another for offset. 1026 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1027 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1028 1029 // Insert right away in the cache to handle recursive PHIs. 1030 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 1031 1032 // Compute offset/size for each PHI incoming pointer. 1033 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 1034 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 1035 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 1036 1037 if (!bothKnown(EdgeData)) { 1038 OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy)); 1039 OffsetPHI->eraseFromParent(); 1040 InsertedInstructions.erase(OffsetPHI); 1041 SizePHI->replaceAllUsesWith(UndefValue::get(IntTy)); 1042 SizePHI->eraseFromParent(); 1043 InsertedInstructions.erase(SizePHI); 1044 return unknown(); 1045 } 1046 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 1047 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 1048 } 1049 1050 Value *Size = SizePHI, *Offset = OffsetPHI; 1051 if (Value *Tmp = SizePHI->hasConstantValue()) { 1052 Size = Tmp; 1053 SizePHI->replaceAllUsesWith(Size); 1054 SizePHI->eraseFromParent(); 1055 InsertedInstructions.erase(SizePHI); 1056 } 1057 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 1058 Offset = Tmp; 1059 OffsetPHI->replaceAllUsesWith(Offset); 1060 OffsetPHI->eraseFromParent(); 1061 InsertedInstructions.erase(OffsetPHI); 1062 } 1063 return std::make_pair(Size, Offset); 1064 } 1065 1066 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1067 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1068 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1069 1070 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1071 return unknown(); 1072 if (TrueSide == FalseSide) 1073 return TrueSide; 1074 1075 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1076 FalseSide.first); 1077 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1078 FalseSide.second); 1079 return std::make_pair(Size, Offset); 1080 } 1081 1082 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1083 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1084 << '\n'); 1085 return unknown(); 1086 } 1087