1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/TargetFolder.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/Utils/Local.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/Argument.h" 25 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GlobalAlias.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/Instruction.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Operator.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include <cassert> 43 #include <cstdint> 44 #include <iterator> 45 #include <type_traits> 46 #include <utility> 47 48 using namespace llvm; 49 50 #define DEBUG_TYPE "memory-builtins" 51 52 enum AllocType : uint8_t { 53 OpNewLike = 1<<0, // allocates; never returns null 54 MallocLike = 1<<1, // allocates; may return null 55 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 56 CallocLike = 1<<3, // allocates + bzero 57 ReallocLike = 1<<4, // reallocates 58 StrDupLike = 1<<5, 59 MallocOrOpNewLike = MallocLike | OpNewLike, 60 MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike, 61 AllocLike = MallocOrCallocLike | StrDupLike, 62 AnyAlloc = AllocLike | ReallocLike 63 }; 64 65 struct AllocFnsTy { 66 AllocType AllocTy; 67 unsigned NumParams; 68 // First and Second size parameters (or -1 if unused) 69 int FstParam, SndParam; 70 // Alignment parameter for aligned_alloc and aligned new 71 int AlignParam; 72 }; 73 74 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 75 // know which functions are nounwind, noalias, nocapture parameters, etc. 76 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 77 {LibFunc_malloc, {MallocLike, 1, 0, -1, -1}}, 78 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1}}, 79 {LibFunc_valloc, {MallocLike, 1, 0, -1, -1}}, 80 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int) 81 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow) 82 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned int, align_val_t) 83 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned int, align_val_t, nothrow) 84 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long) 85 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long, nothrow) 86 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned long, align_val_t) 87 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned long, align_val_t, nothrow) 88 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int) 89 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow) 90 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned int, align_val_t) 91 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned int, align_val_t, nothrow) 92 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long) 93 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long, nothrow) 94 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned long, align_val_t) 95 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned long, align_val_t, nothrow) 96 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int) 97 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow) 98 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long long) 99 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long long, nothrow) 100 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int) 101 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow) 102 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long long) 103 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long long, nothrow) 104 {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0}}, 105 {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0}}, 106 {LibFunc_calloc, {CallocLike, 2, 0, 1, -1}}, 107 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1}}, 108 {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1}}, 109 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1}}, 110 {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1}}, 111 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1}}, 112 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1}}, 113 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1}}, 114 // TODO: Handle "int posix_memalign(void **, size_t, size_t)" 115 }; 116 117 static const Function *getCalledFunction(const Value *V, 118 bool &IsNoBuiltin) { 119 // Don't care about intrinsics in this case. 120 if (isa<IntrinsicInst>(V)) 121 return nullptr; 122 123 const auto *CB = dyn_cast<CallBase>(V); 124 if (!CB) 125 return nullptr; 126 127 IsNoBuiltin = CB->isNoBuiltin(); 128 129 if (const Function *Callee = CB->getCalledFunction()) 130 return Callee; 131 return nullptr; 132 } 133 134 /// Returns the allocation data for the given value if it's a call to a known 135 /// allocation function. 136 static Optional<AllocFnsTy> 137 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 138 const TargetLibraryInfo *TLI) { 139 // Make sure that the function is available. 140 LibFunc TLIFn; 141 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 142 return None; 143 144 const auto *Iter = find_if( 145 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 146 return P.first == TLIFn; 147 }); 148 149 if (Iter == std::end(AllocationFnData)) 150 return None; 151 152 const AllocFnsTy *FnData = &Iter->second; 153 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 154 return None; 155 156 // Check function prototype. 157 int FstParam = FnData->FstParam; 158 int SndParam = FnData->SndParam; 159 FunctionType *FTy = Callee->getFunctionType(); 160 161 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 162 FTy->getNumParams() == FnData->NumParams && 163 (FstParam < 0 || 164 (FTy->getParamType(FstParam)->isIntegerTy(32) || 165 FTy->getParamType(FstParam)->isIntegerTy(64))) && 166 (SndParam < 0 || 167 FTy->getParamType(SndParam)->isIntegerTy(32) || 168 FTy->getParamType(SndParam)->isIntegerTy(64))) 169 return *FnData; 170 return None; 171 } 172 173 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 174 const TargetLibraryInfo *TLI) { 175 bool IsNoBuiltinCall; 176 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 177 if (!IsNoBuiltinCall) 178 return getAllocationDataForFunction(Callee, AllocTy, TLI); 179 return None; 180 } 181 182 static Optional<AllocFnsTy> 183 getAllocationData(const Value *V, AllocType AllocTy, 184 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 185 bool IsNoBuiltinCall; 186 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 187 if (!IsNoBuiltinCall) 188 return getAllocationDataForFunction( 189 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 190 return None; 191 } 192 193 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 194 const TargetLibraryInfo *TLI) { 195 bool IsNoBuiltinCall; 196 const Function *Callee = 197 getCalledFunction(V, IsNoBuiltinCall); 198 if (!Callee) 199 return None; 200 201 // Prefer to use existing information over allocsize. This will give us an 202 // accurate AllocTy. 203 if (!IsNoBuiltinCall) 204 if (Optional<AllocFnsTy> Data = 205 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 206 return Data; 207 208 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 209 if (Attr == Attribute()) 210 return None; 211 212 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 213 214 AllocFnsTy Result; 215 // Because allocsize only tells us how many bytes are allocated, we're not 216 // really allowed to assume anything, so we use MallocLike. 217 Result.AllocTy = MallocLike; 218 Result.NumParams = Callee->getNumOperands(); 219 Result.FstParam = Args.first; 220 Result.SndParam = Args.second.getValueOr(-1); 221 // Allocsize has no way to specify an alignment argument 222 Result.AlignParam = -1; 223 return Result; 224 } 225 226 /// Tests if a value is a call or invoke to a library function that 227 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 228 /// like). 229 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 230 return getAllocationData(V, AnyAlloc, TLI).hasValue(); 231 } 232 bool llvm::isAllocationFn( 233 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 234 return getAllocationData(V, AnyAlloc, GetTLI).hasValue(); 235 } 236 237 /// Tests if a value is a call or invoke to a library function that 238 /// allocates uninitialized memory (such as malloc). 239 static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 240 return getAllocationData(V, MallocOrOpNewLike, TLI).hasValue(); 241 } 242 243 /// Tests if a value is a call or invoke to a library function that 244 /// allocates uninitialized memory with alignment (such as aligned_alloc). 245 static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 246 return getAllocationData(V, AlignedAllocLike, TLI) 247 .hasValue(); 248 } 249 250 /// Tests if a value is a call or invoke to a library function that 251 /// allocates zero-filled memory (such as calloc). 252 static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 253 return getAllocationData(V, CallocLike, TLI).hasValue(); 254 } 255 256 /// Tests if a value is a call or invoke to a library function that 257 /// allocates memory similar to malloc or calloc. 258 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 259 return getAllocationData(V, MallocOrCallocLike, TLI).hasValue(); 260 } 261 262 /// Tests if a value is a call or invoke to a library function that 263 /// allocates memory (either malloc, calloc, or strdup like). 264 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 265 return getAllocationData(V, AllocLike, TLI).hasValue(); 266 } 267 268 /// Tests if a value is a call or invoke to a library function that 269 /// reallocates memory (e.g., realloc). 270 bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 271 return getAllocationData(V, ReallocLike, TLI).hasValue(); 272 } 273 274 /// Tests if a functions is a call or invoke to a library function that 275 /// reallocates memory (e.g., realloc). 276 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 277 return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue(); 278 } 279 280 bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) { 281 assert(isAllocationFn(CB, TLI)); 282 283 // Note: Removability is highly dependent on the source language. For 284 // example, recent C++ requires direct calls to the global allocation 285 // [basic.stc.dynamic.allocation] to be observable unless part of a new 286 // expression [expr.new paragraph 13]. 287 288 // Historically we've treated the C family allocation routines as removable 289 return isAllocLikeFn(CB, TLI); 290 } 291 292 Value *llvm::getAllocAlignment(const CallBase *V, 293 const TargetLibraryInfo *TLI) { 294 assert(isAllocationFn(V, TLI)); 295 296 const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 297 if (!FnData.hasValue() || FnData->AlignParam < 0) { 298 return nullptr; 299 } 300 return V->getOperand(FnData->AlignParam); 301 } 302 303 /// When we're compiling N-bit code, and the user uses parameters that are 304 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 305 /// trouble with APInt size issues. This function handles resizing + overflow 306 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 307 /// I's value. 308 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 309 // More bits than we can handle. Checking the bit width isn't necessary, but 310 // it's faster than checking active bits, and should give `false` in the 311 // vast majority of cases. 312 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 313 return false; 314 if (I.getBitWidth() != IntTyBits) 315 I = I.zextOrTrunc(IntTyBits); 316 return true; 317 } 318 319 Optional<APInt> 320 llvm::getAllocSize(const CallBase *CB, 321 const TargetLibraryInfo *TLI, 322 std::function<const Value*(const Value*)> Mapper) { 323 // Note: This handles both explicitly listed allocation functions and 324 // allocsize. The code structure could stand to be cleaned up a bit. 325 Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 326 if (!FnData) 327 return None; 328 329 // Get the index type for this address space, results and intermediate 330 // computations are performed at that width. 331 auto &DL = CB->getModule()->getDataLayout(); 332 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 333 334 // Handle strdup-like functions separately. 335 if (FnData->AllocTy == StrDupLike) { 336 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 337 if (!Size) 338 return None; 339 340 // Strndup limits strlen. 341 if (FnData->FstParam > 0) { 342 const ConstantInt *Arg = 343 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 344 if (!Arg) 345 return None; 346 347 APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); 348 if (Size.ugt(MaxSize)) 349 Size = MaxSize + 1; 350 } 351 return Size; 352 } 353 354 const ConstantInt *Arg = 355 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 356 if (!Arg) 357 return None; 358 359 APInt Size = Arg->getValue(); 360 if (!CheckedZextOrTrunc(Size, IntTyBits)) 361 return None; 362 363 // Size is determined by just 1 parameter. 364 if (FnData->SndParam < 0) 365 return Size; 366 367 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 368 if (!Arg) 369 return None; 370 371 APInt NumElems = Arg->getValue(); 372 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 373 return None; 374 375 bool Overflow; 376 Size = Size.umul_ov(NumElems, Overflow); 377 if (Overflow) 378 return None; 379 return Size; 380 } 381 382 Constant *llvm::getInitialValueOfAllocation(const CallBase *Alloc, 383 const TargetLibraryInfo *TLI, 384 Type *Ty) { 385 assert(isAllocationFn(Alloc, TLI)); 386 387 // malloc and aligned_alloc are uninitialized (undef) 388 if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI)) 389 return UndefValue::get(Ty); 390 391 // calloc zero initializes 392 if (isCallocLikeFn(Alloc, TLI)) 393 return Constant::getNullValue(Ty); 394 395 return nullptr; 396 } 397 398 struct FreeFnsTy { 399 unsigned NumParams; 400 }; 401 402 // clang-format off 403 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 404 {LibFunc_free, {1}}, 405 {LibFunc_ZdlPv, {1}}, // operator delete(void*) 406 {LibFunc_ZdaPv, {1}}, // operator delete[](void*) 407 {LibFunc_msvc_delete_ptr32, {1}}, // operator delete(void*) 408 {LibFunc_msvc_delete_ptr64, {1}}, // operator delete(void*) 409 {LibFunc_msvc_delete_array_ptr32, {1}}, // operator delete[](void*) 410 {LibFunc_msvc_delete_array_ptr64, {1}}, // operator delete[](void*) 411 {LibFunc_ZdlPvj, {2}}, // delete(void*, uint) 412 {LibFunc_ZdlPvm, {2}}, // delete(void*, ulong) 413 {LibFunc_ZdlPvRKSt9nothrow_t, {2}}, // delete(void*, nothrow) 414 {LibFunc_ZdlPvSt11align_val_t, {2}}, // delete(void*, align_val_t) 415 {LibFunc_ZdaPvj, {2}}, // delete[](void*, uint) 416 {LibFunc_ZdaPvm, {2}}, // delete[](void*, ulong) 417 {LibFunc_ZdaPvRKSt9nothrow_t, {2}}, // delete[](void*, nothrow) 418 {LibFunc_ZdaPvSt11align_val_t, {2}}, // delete[](void*, align_val_t) 419 {LibFunc_msvc_delete_ptr32_int, {2}}, // delete(void*, uint) 420 {LibFunc_msvc_delete_ptr64_longlong, {2}}, // delete(void*, ulonglong) 421 {LibFunc_msvc_delete_ptr32_nothrow, {2}}, // delete(void*, nothrow) 422 {LibFunc_msvc_delete_ptr64_nothrow, {2}}, // delete(void*, nothrow) 423 {LibFunc_msvc_delete_array_ptr32_int, {2}}, // delete[](void*, uint) 424 {LibFunc_msvc_delete_array_ptr64_longlong, {2}}, // delete[](void*, ulonglong) 425 {LibFunc_msvc_delete_array_ptr32_nothrow, {2}}, // delete[](void*, nothrow) 426 {LibFunc_msvc_delete_array_ptr64_nothrow, {2}}, // delete[](void*, nothrow) 427 {LibFunc___kmpc_free_shared, {2}}, // OpenMP Offloading RTL free 428 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3}}, // delete(void*, align_val_t, nothrow) 429 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3}}, // delete[](void*, align_val_t, nothrow) 430 {LibFunc_ZdlPvjSt11align_val_t, {3}}, // delete(void*, unsigned int, align_val_t) 431 {LibFunc_ZdlPvmSt11align_val_t, {3}}, // delete(void*, unsigned long, align_val_t) 432 {LibFunc_ZdaPvjSt11align_val_t, {3}}, // delete[](void*, unsigned int, align_val_t) 433 {LibFunc_ZdaPvmSt11align_val_t, {3}}, // delete[](void*, unsigned long, align_val_t) 434 }; 435 // clang-format on 436 437 Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 438 const LibFunc TLIFn) { 439 const auto *Iter = 440 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 441 return P.first == TLIFn; 442 }); 443 if (Iter == std::end(FreeFnData)) 444 return None; 445 return Iter->second; 446 } 447 448 /// isLibFreeFunction - Returns true if the function is a builtin free() 449 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 450 Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 451 if (!FnData.hasValue()) 452 return false; 453 454 // Check free prototype. 455 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 456 // attribute will exist. 457 FunctionType *FTy = F->getFunctionType(); 458 if (!FTy->getReturnType()->isVoidTy()) 459 return false; 460 if (FTy->getNumParams() != FnData->NumParams) 461 return false; 462 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 463 return false; 464 465 return true; 466 } 467 468 /// isFreeCall - Returns non-null if the value is a call to the builtin free() 469 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { 470 bool IsNoBuiltinCall; 471 const Function *Callee = getCalledFunction(I, IsNoBuiltinCall); 472 if (Callee == nullptr || IsNoBuiltinCall) 473 return nullptr; 474 475 LibFunc TLIFn; 476 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 477 return nullptr; 478 479 return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr; 480 } 481 482 483 //===----------------------------------------------------------------------===// 484 // Utility functions to compute size of objects. 485 // 486 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 487 if (Data.second.isNegative() || Data.first.ult(Data.second)) 488 return APInt(Data.first.getBitWidth(), 0); 489 return Data.first - Data.second; 490 } 491 492 /// Compute the size of the object pointed by Ptr. Returns true and the 493 /// object size in Size if successful, and false otherwise. 494 /// If RoundToAlign is true, then Size is rounded up to the alignment of 495 /// allocas, byval arguments, and global variables. 496 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 497 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 498 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 499 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 500 if (!Visitor.bothKnown(Data)) 501 return false; 502 503 Size = getSizeWithOverflow(Data).getZExtValue(); 504 return true; 505 } 506 507 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 508 const DataLayout &DL, 509 const TargetLibraryInfo *TLI, 510 bool MustSucceed) { 511 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 512 "ObjectSize must be a call to llvm.objectsize!"); 513 514 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 515 ObjectSizeOpts EvalOptions; 516 // Unless we have to fold this to something, try to be as accurate as 517 // possible. 518 if (MustSucceed) 519 EvalOptions.EvalMode = 520 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 521 else 522 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 523 524 EvalOptions.NullIsUnknownSize = 525 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 526 527 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 528 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 529 if (StaticOnly) { 530 // FIXME: Does it make sense to just return a failure value if the size won't 531 // fit in the output and `!MustSucceed`? 532 uint64_t Size; 533 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 534 isUIntN(ResultType->getBitWidth(), Size)) 535 return ConstantInt::get(ResultType, Size); 536 } else { 537 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 538 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 539 SizeOffsetEvalType SizeOffsetPair = 540 Eval.compute(ObjectSize->getArgOperand(0)); 541 542 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 543 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 544 Builder.SetInsertPoint(ObjectSize); 545 546 // If we've outside the end of the object, then we can always access 547 // exactly 0 bytes. 548 Value *ResultSize = 549 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 550 Value *UseZero = 551 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 552 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 553 Value *Ret = Builder.CreateSelect( 554 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 555 556 // The non-constant size expression cannot evaluate to -1. 557 if (!isa<Constant>(SizeOffsetPair.first) || 558 !isa<Constant>(SizeOffsetPair.second)) 559 Builder.CreateAssumption( 560 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 561 562 return Ret; 563 } 564 } 565 566 if (!MustSucceed) 567 return nullptr; 568 569 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 570 } 571 572 STATISTIC(ObjectVisitorArgument, 573 "Number of arguments with unsolved size and offset"); 574 STATISTIC(ObjectVisitorLoad, 575 "Number of load instructions with unsolved size and offset"); 576 577 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 578 if (Options.RoundToAlign && Alignment) 579 return APInt(IntTyBits, alignTo(Size.getZExtValue(), Alignment)); 580 return Size; 581 } 582 583 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 584 const TargetLibraryInfo *TLI, 585 LLVMContext &Context, 586 ObjectSizeOpts Options) 587 : DL(DL), TLI(TLI), Options(Options) { 588 // Pointer size must be rechecked for each object visited since it could have 589 // a different address space. 590 } 591 592 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 593 unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 594 595 // Stripping pointer casts can strip address space casts which can change the 596 // index type size. The invariant is that we use the value type to determine 597 // the index type size and if we stripped address space casts we have to 598 // readjust the APInt as we pass it upwards in order for the APInt to match 599 // the type the caller passed in. 600 APInt Offset(InitialIntTyBits, 0); 601 V = V->stripAndAccumulateConstantOffsets( 602 DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true); 603 604 // Later we use the index type size and zero but it will match the type of the 605 // value that is passed to computeImpl. 606 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 607 Zero = APInt::getZero(IntTyBits); 608 609 bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; 610 if (!IndexTypeSizeChanged && Offset.isZero()) 611 return computeImpl(V); 612 613 // We stripped an address space cast that changed the index type size or we 614 // accumulated some constant offset (or both). Readjust the bit width to match 615 // the argument index type size and apply the offset, as required. 616 SizeOffsetType SOT = computeImpl(V); 617 if (IndexTypeSizeChanged) { 618 if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) 619 SOT.first = APInt(); 620 if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) 621 SOT.second = APInt(); 622 } 623 // If the computed offset is "unknown" we cannot add the stripped offset. 624 return {SOT.first, 625 SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; 626 } 627 628 SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { 629 if (Instruction *I = dyn_cast<Instruction>(V)) { 630 // If we have already seen this instruction, bail out. Cycles can happen in 631 // unreachable code after constant propagation. 632 if (!SeenInsts.insert(I).second) 633 return unknown(); 634 635 return visit(*I); 636 } 637 if (Argument *A = dyn_cast<Argument>(V)) 638 return visitArgument(*A); 639 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 640 return visitConstantPointerNull(*P); 641 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 642 return visitGlobalAlias(*GA); 643 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 644 return visitGlobalVariable(*GV); 645 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 646 return visitUndefValue(*UV); 647 648 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 649 << *V << '\n'); 650 return unknown(); 651 } 652 653 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 654 return ::CheckedZextOrTrunc(I, IntTyBits); 655 } 656 657 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 658 if (!I.getAllocatedType()->isSized()) 659 return unknown(); 660 661 TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); 662 if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) 663 return unknown(); 664 APInt Size(IntTyBits, ElemSize.getKnownMinSize()); 665 if (!I.isArrayAllocation()) 666 return std::make_pair(align(Size, I.getAlign()), Zero); 667 668 Value *ArraySize = I.getArraySize(); 669 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 670 APInt NumElems = C->getValue(); 671 if (!CheckedZextOrTrunc(NumElems)) 672 return unknown(); 673 674 bool Overflow; 675 Size = Size.umul_ov(NumElems, Overflow); 676 return Overflow ? unknown() 677 : std::make_pair(align(Size, I.getAlign()), Zero); 678 } 679 return unknown(); 680 } 681 682 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 683 Type *MemoryTy = A.getPointeeInMemoryValueType(); 684 // No interprocedural analysis is done at the moment. 685 if (!MemoryTy|| !MemoryTy->isSized()) { 686 ++ObjectVisitorArgument; 687 return unknown(); 688 } 689 690 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 691 return std::make_pair(align(Size, A.getParamAlign()), Zero); 692 } 693 694 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 695 auto Mapper = [](const Value *V) { return V; }; 696 if (Optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) 697 return std::make_pair(*Size, Zero); 698 return unknown(); 699 } 700 701 SizeOffsetType 702 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 703 // If null is unknown, there's nothing we can do. Additionally, non-zero 704 // address spaces can make use of null, so we don't presume to know anything 705 // about that. 706 // 707 // TODO: How should this work with address space casts? We currently just drop 708 // them on the floor, but it's unclear what we should do when a NULL from 709 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 710 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 711 return unknown(); 712 return std::make_pair(Zero, Zero); 713 } 714 715 SizeOffsetType 716 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 717 return unknown(); 718 } 719 720 SizeOffsetType 721 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 722 // Easy cases were already folded by previous passes. 723 return unknown(); 724 } 725 726 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 727 if (GA.isInterposable()) 728 return unknown(); 729 return compute(GA.getAliasee()); 730 } 731 732 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 733 if (!GV.hasDefinitiveInitializer()) 734 return unknown(); 735 736 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 737 return std::make_pair(align(Size, GV.getAlign()), Zero); 738 } 739 740 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 741 // clueless 742 return unknown(); 743 } 744 745 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { 746 ++ObjectVisitorLoad; 747 return unknown(); 748 } 749 750 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { 751 // too complex to analyze statically. 752 return unknown(); 753 } 754 755 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 756 SizeOffsetType TrueSide = compute(I.getTrueValue()); 757 SizeOffsetType FalseSide = compute(I.getFalseValue()); 758 if (bothKnown(TrueSide) && bothKnown(FalseSide)) { 759 if (TrueSide == FalseSide) { 760 return TrueSide; 761 } 762 763 APInt TrueResult = getSizeWithOverflow(TrueSide); 764 APInt FalseResult = getSizeWithOverflow(FalseSide); 765 766 if (TrueResult == FalseResult) { 767 return TrueSide; 768 } 769 if (Options.EvalMode == ObjectSizeOpts::Mode::Min) { 770 if (TrueResult.slt(FalseResult)) 771 return TrueSide; 772 return FalseSide; 773 } 774 if (Options.EvalMode == ObjectSizeOpts::Mode::Max) { 775 if (TrueResult.sgt(FalseResult)) 776 return TrueSide; 777 return FalseSide; 778 } 779 } 780 return unknown(); 781 } 782 783 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 784 return std::make_pair(Zero, Zero); 785 } 786 787 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 788 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 789 << '\n'); 790 return unknown(); 791 } 792 793 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 794 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 795 ObjectSizeOpts EvalOpts) 796 : DL(DL), TLI(TLI), Context(Context), 797 Builder(Context, TargetFolder(DL), 798 IRBuilderCallbackInserter( 799 [&](Instruction *I) { InsertedInstructions.insert(I); })), 800 EvalOpts(EvalOpts) { 801 // IntTy and Zero must be set for each compute() since the address space may 802 // be different for later objects. 803 } 804 805 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 806 // XXX - Are vectors of pointers possible here? 807 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 808 Zero = ConstantInt::get(IntTy, 0); 809 810 SizeOffsetEvalType Result = compute_(V); 811 812 if (!bothKnown(Result)) { 813 // Erase everything that was computed in this iteration from the cache, so 814 // that no dangling references are left behind. We could be a bit smarter if 815 // we kept a dependency graph. It's probably not worth the complexity. 816 for (const Value *SeenVal : SeenVals) { 817 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 818 // non-computable results can be safely cached 819 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 820 CacheMap.erase(CacheIt); 821 } 822 823 // Erase any instructions we inserted as part of the traversal. 824 for (Instruction *I : InsertedInstructions) { 825 I->replaceAllUsesWith(UndefValue::get(I->getType())); 826 I->eraseFromParent(); 827 } 828 } 829 830 SeenVals.clear(); 831 InsertedInstructions.clear(); 832 return Result; 833 } 834 835 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 836 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 837 SizeOffsetType Const = Visitor.compute(V); 838 if (Visitor.bothKnown(Const)) 839 return std::make_pair(ConstantInt::get(Context, Const.first), 840 ConstantInt::get(Context, Const.second)); 841 842 V = V->stripPointerCasts(); 843 844 // Check cache. 845 CacheMapTy::iterator CacheIt = CacheMap.find(V); 846 if (CacheIt != CacheMap.end()) 847 return CacheIt->second; 848 849 // Always generate code immediately before the instruction being 850 // processed, so that the generated code dominates the same BBs. 851 BuilderTy::InsertPointGuard Guard(Builder); 852 if (Instruction *I = dyn_cast<Instruction>(V)) 853 Builder.SetInsertPoint(I); 854 855 // Now compute the size and offset. 856 SizeOffsetEvalType Result; 857 858 // Record the pointers that were handled in this run, so that they can be 859 // cleaned later if something fails. We also use this set to break cycles that 860 // can occur in dead code. 861 if (!SeenVals.insert(V).second) { 862 Result = unknown(); 863 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 864 Result = visitGEPOperator(*GEP); 865 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 866 Result = visit(*I); 867 } else if (isa<Argument>(V) || 868 (isa<ConstantExpr>(V) && 869 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 870 isa<GlobalAlias>(V) || 871 isa<GlobalVariable>(V)) { 872 // Ignore values where we cannot do more than ObjectSizeVisitor. 873 Result = unknown(); 874 } else { 875 LLVM_DEBUG( 876 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 877 << '\n'); 878 Result = unknown(); 879 } 880 881 // Don't reuse CacheIt since it may be invalid at this point. 882 CacheMap[V] = Result; 883 return Result; 884 } 885 886 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 887 if (!I.getAllocatedType()->isSized()) 888 return unknown(); 889 890 // must be a VLA 891 assert(I.isArrayAllocation()); 892 893 // If needed, adjust the alloca's operand size to match the pointer size. 894 // Subsequent math operations expect the types to match. 895 Value *ArraySize = Builder.CreateZExtOrTrunc( 896 I.getArraySize(), DL.getIntPtrType(I.getContext())); 897 assert(ArraySize->getType() == Zero->getType() && 898 "Expected zero constant to have pointer type"); 899 900 Value *Size = ConstantInt::get(ArraySize->getType(), 901 DL.getTypeAllocSize(I.getAllocatedType())); 902 Size = Builder.CreateMul(Size, ArraySize); 903 return std::make_pair(Size, Zero); 904 } 905 906 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 907 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 908 if (!FnData) 909 return unknown(); 910 911 // Handle strdup-like functions separately. 912 if (FnData->AllocTy == StrDupLike) { 913 // TODO: implement evaluation of strdup/strndup 914 return unknown(); 915 } 916 917 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 918 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 919 if (FnData->SndParam < 0) 920 return std::make_pair(FirstArg, Zero); 921 922 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 923 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 924 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 925 return std::make_pair(Size, Zero); 926 } 927 928 SizeOffsetEvalType 929 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 930 return unknown(); 931 } 932 933 SizeOffsetEvalType 934 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 935 return unknown(); 936 } 937 938 SizeOffsetEvalType 939 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 940 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 941 if (!bothKnown(PtrData)) 942 return unknown(); 943 944 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 945 Offset = Builder.CreateAdd(PtrData.second, Offset); 946 return std::make_pair(PtrData.first, Offset); 947 } 948 949 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 950 // clueless 951 return unknown(); 952 } 953 954 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { 955 return unknown(); 956 } 957 958 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 959 // Create 2 PHIs: one for size and another for offset. 960 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 961 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 962 963 // Insert right away in the cache to handle recursive PHIs. 964 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 965 966 // Compute offset/size for each PHI incoming pointer. 967 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 968 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 969 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 970 971 if (!bothKnown(EdgeData)) { 972 OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy)); 973 OffsetPHI->eraseFromParent(); 974 InsertedInstructions.erase(OffsetPHI); 975 SizePHI->replaceAllUsesWith(UndefValue::get(IntTy)); 976 SizePHI->eraseFromParent(); 977 InsertedInstructions.erase(SizePHI); 978 return unknown(); 979 } 980 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 981 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 982 } 983 984 Value *Size = SizePHI, *Offset = OffsetPHI; 985 if (Value *Tmp = SizePHI->hasConstantValue()) { 986 Size = Tmp; 987 SizePHI->replaceAllUsesWith(Size); 988 SizePHI->eraseFromParent(); 989 InsertedInstructions.erase(SizePHI); 990 } 991 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 992 Offset = Tmp; 993 OffsetPHI->replaceAllUsesWith(Offset); 994 OffsetPHI->eraseFromParent(); 995 InsertedInstructions.erase(OffsetPHI); 996 } 997 return std::make_pair(Size, Offset); 998 } 999 1000 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1001 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1002 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1003 1004 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1005 return unknown(); 1006 if (TrueSide == FalseSide) 1007 return TrueSide; 1008 1009 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1010 FalseSide.first); 1011 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1012 FalseSide.second); 1013 return std::make_pair(Size, Offset); 1014 } 1015 1016 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1017 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1018 << '\n'); 1019 return unknown(); 1020 } 1021