1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/Analysis/TargetFolder.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/Analysis/Utils/Local.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/IR/Argument.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/Operator.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <cassert> 44 #include <cstdint> 45 #include <iterator> 46 #include <type_traits> 47 #include <utility> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "memory-builtins" 52 53 enum AllocType : uint8_t { 54 OpNewLike = 1<<0, // allocates; never returns null 55 MallocLike = 1<<1, // allocates; may return null 56 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 57 CallocLike = 1<<3, // allocates + bzero 58 ReallocLike = 1<<4, // reallocates 59 StrDupLike = 1<<5, 60 MallocOrOpNewLike = MallocLike | OpNewLike, 61 MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike, 62 AllocLike = MallocOrCallocLike | StrDupLike, 63 AnyAlloc = AllocLike | ReallocLike 64 }; 65 66 struct AllocFnsTy { 67 AllocType AllocTy; 68 unsigned NumParams; 69 // First and Second size parameters (or -1 if unused) 70 int FstParam, SndParam; 71 // Alignment parameter for aligned_alloc and aligned new 72 int AlignParam; 73 }; 74 75 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 76 // know which functions are nounwind, noalias, nocapture parameters, etc. 77 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 78 {LibFunc_malloc, {MallocLike, 1, 0, -1, -1}}, 79 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1}}, 80 {LibFunc_valloc, {MallocLike, 1, 0, -1, -1}}, 81 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int) 82 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow) 83 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned int, align_val_t) 84 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned int, align_val_t, nothrow) 85 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long) 86 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long, nothrow) 87 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned long, align_val_t) 88 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned long, align_val_t, nothrow) 89 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int) 90 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow) 91 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned int, align_val_t) 92 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned int, align_val_t, nothrow) 93 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long) 94 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long, nothrow) 95 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned long, align_val_t) 96 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned long, align_val_t, nothrow) 97 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int) 98 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow) 99 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long long) 100 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long long, nothrow) 101 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int) 102 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow) 103 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long long) 104 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long long, nothrow) 105 {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0}}, 106 {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0}}, 107 {LibFunc_calloc, {CallocLike, 2, 0, 1, -1}}, 108 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1}}, 109 {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1}}, 110 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1}}, 111 {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1}}, 112 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1}}, 113 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1}}, 114 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1}}, 115 // TODO: Handle "int posix_memalign(void **, size_t, size_t)" 116 }; 117 118 static const Function *getCalledFunction(const Value *V, 119 bool &IsNoBuiltin) { 120 // Don't care about intrinsics in this case. 121 if (isa<IntrinsicInst>(V)) 122 return nullptr; 123 124 const auto *CB = dyn_cast<CallBase>(V); 125 if (!CB) 126 return nullptr; 127 128 IsNoBuiltin = CB->isNoBuiltin(); 129 130 if (const Function *Callee = CB->getCalledFunction()) 131 return Callee; 132 return nullptr; 133 } 134 135 /// Returns the allocation data for the given value if it's a call to a known 136 /// allocation function. 137 static Optional<AllocFnsTy> 138 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 139 const TargetLibraryInfo *TLI) { 140 // Make sure that the function is available. 141 LibFunc TLIFn; 142 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 143 return None; 144 145 const auto *Iter = find_if( 146 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 147 return P.first == TLIFn; 148 }); 149 150 if (Iter == std::end(AllocationFnData)) 151 return None; 152 153 const AllocFnsTy *FnData = &Iter->second; 154 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 155 return None; 156 157 // Check function prototype. 158 int FstParam = FnData->FstParam; 159 int SndParam = FnData->SndParam; 160 FunctionType *FTy = Callee->getFunctionType(); 161 162 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 163 FTy->getNumParams() == FnData->NumParams && 164 (FstParam < 0 || 165 (FTy->getParamType(FstParam)->isIntegerTy(32) || 166 FTy->getParamType(FstParam)->isIntegerTy(64))) && 167 (SndParam < 0 || 168 FTy->getParamType(SndParam)->isIntegerTy(32) || 169 FTy->getParamType(SndParam)->isIntegerTy(64))) 170 return *FnData; 171 return None; 172 } 173 174 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 175 const TargetLibraryInfo *TLI) { 176 bool IsNoBuiltinCall; 177 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 178 if (!IsNoBuiltinCall) 179 return getAllocationDataForFunction(Callee, AllocTy, TLI); 180 return None; 181 } 182 183 static Optional<AllocFnsTy> 184 getAllocationData(const Value *V, AllocType AllocTy, 185 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 186 bool IsNoBuiltinCall; 187 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 188 if (!IsNoBuiltinCall) 189 return getAllocationDataForFunction( 190 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 191 return None; 192 } 193 194 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 195 const TargetLibraryInfo *TLI) { 196 bool IsNoBuiltinCall; 197 const Function *Callee = 198 getCalledFunction(V, IsNoBuiltinCall); 199 if (!Callee) 200 return None; 201 202 // Prefer to use existing information over allocsize. This will give us an 203 // accurate AllocTy. 204 if (!IsNoBuiltinCall) 205 if (Optional<AllocFnsTy> Data = 206 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 207 return Data; 208 209 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 210 if (Attr == Attribute()) 211 return None; 212 213 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 214 215 AllocFnsTy Result; 216 // Because allocsize only tells us how many bytes are allocated, we're not 217 // really allowed to assume anything, so we use MallocLike. 218 Result.AllocTy = MallocLike; 219 Result.NumParams = Callee->getNumOperands(); 220 Result.FstParam = Args.first; 221 Result.SndParam = Args.second.getValueOr(-1); 222 // Allocsize has no way to specify an alignment argument 223 Result.AlignParam = -1; 224 return Result; 225 } 226 227 /// Tests if a value is a call or invoke to a library function that 228 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 229 /// like). 230 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 231 return getAllocationData(V, AnyAlloc, TLI).hasValue(); 232 } 233 bool llvm::isAllocationFn( 234 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 235 return getAllocationData(V, AnyAlloc, GetTLI).hasValue(); 236 } 237 238 /// Tests if a value is a call or invoke to a library function that 239 /// allocates uninitialized memory (such as malloc). 240 static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 241 return getAllocationData(V, MallocOrOpNewLike, TLI).hasValue(); 242 } 243 244 /// Tests if a value is a call or invoke to a library function that 245 /// allocates uninitialized memory with alignment (such as aligned_alloc). 246 static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 247 return getAllocationData(V, AlignedAllocLike, TLI) 248 .hasValue(); 249 } 250 251 /// Tests if a value is a call or invoke to a library function that 252 /// allocates zero-filled memory (such as calloc). 253 static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 254 return getAllocationData(V, CallocLike, TLI).hasValue(); 255 } 256 257 /// Tests if a value is a call or invoke to a library function that 258 /// allocates memory similar to malloc or calloc. 259 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 260 return getAllocationData(V, MallocOrCallocLike, TLI).hasValue(); 261 } 262 263 /// Tests if a value is a call or invoke to a library function that 264 /// allocates memory (either malloc, calloc, or strdup like). 265 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 266 return getAllocationData(V, AllocLike, TLI).hasValue(); 267 } 268 269 /// Tests if a value is a call or invoke to a library function that 270 /// reallocates memory (e.g., realloc). 271 bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 272 return getAllocationData(V, ReallocLike, TLI).hasValue(); 273 } 274 275 /// Tests if a functions is a call or invoke to a library function that 276 /// reallocates memory (e.g., realloc). 277 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 278 return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue(); 279 } 280 281 bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) { 282 assert(isAllocationFn(CB, TLI)); 283 284 // Note: Removability is highly dependent on the source language. For 285 // example, recent C++ requires direct calls to the global allocation 286 // [basic.stc.dynamic.allocation] to be observable unless part of a new 287 // expression [expr.new paragraph 13]. 288 289 // Historically we've treated the C family allocation routines as removable 290 return isAllocLikeFn(CB, TLI); 291 } 292 293 Value *llvm::getAllocAlignment(const CallBase *V, 294 const TargetLibraryInfo *TLI) { 295 assert(isAllocationFn(V, TLI)); 296 297 const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 298 if (!FnData.hasValue() || FnData->AlignParam < 0) { 299 return nullptr; 300 } 301 return V->getOperand(FnData->AlignParam); 302 } 303 304 /// When we're compiling N-bit code, and the user uses parameters that are 305 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 306 /// trouble with APInt size issues. This function handles resizing + overflow 307 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 308 /// I's value. 309 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 310 // More bits than we can handle. Checking the bit width isn't necessary, but 311 // it's faster than checking active bits, and should give `false` in the 312 // vast majority of cases. 313 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 314 return false; 315 if (I.getBitWidth() != IntTyBits) 316 I = I.zextOrTrunc(IntTyBits); 317 return true; 318 } 319 320 Optional<APInt> 321 llvm::getAllocSize(const CallBase *CB, 322 const TargetLibraryInfo *TLI, 323 std::function<const Value*(const Value*)> Mapper) { 324 // Note: This handles both explicitly listed allocation functions and 325 // allocsize. The code structure could stand to be cleaned up a bit. 326 Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 327 if (!FnData) 328 return None; 329 330 // Get the index type for this address space, results and intermediate 331 // computations are performed at that width. 332 auto &DL = CB->getModule()->getDataLayout(); 333 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 334 335 // Handle strdup-like functions separately. 336 if (FnData->AllocTy == StrDupLike) { 337 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 338 if (!Size) 339 return None; 340 341 // Strndup limits strlen. 342 if (FnData->FstParam > 0) { 343 const ConstantInt *Arg = 344 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 345 if (!Arg) 346 return None; 347 348 APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); 349 if (Size.ugt(MaxSize)) 350 Size = MaxSize + 1; 351 } 352 return Size; 353 } 354 355 const ConstantInt *Arg = 356 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 357 if (!Arg) 358 return None; 359 360 APInt Size = Arg->getValue(); 361 if (!CheckedZextOrTrunc(Size, IntTyBits)) 362 return None; 363 364 // Size is determined by just 1 parameter. 365 if (FnData->SndParam < 0) 366 return Size; 367 368 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 369 if (!Arg) 370 return None; 371 372 APInt NumElems = Arg->getValue(); 373 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 374 return None; 375 376 bool Overflow; 377 Size = Size.umul_ov(NumElems, Overflow); 378 if (Overflow) 379 return None; 380 return Size; 381 } 382 383 Constant *llvm::getInitialValueOfAllocation(const CallBase *Alloc, 384 const TargetLibraryInfo *TLI, 385 Type *Ty) { 386 assert(isAllocationFn(Alloc, TLI)); 387 388 // malloc and aligned_alloc are uninitialized (undef) 389 if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI)) 390 return UndefValue::get(Ty); 391 392 // calloc zero initializes 393 if (isCallocLikeFn(Alloc, TLI)) 394 return Constant::getNullValue(Ty); 395 396 return nullptr; 397 } 398 399 struct FreeFnsTy { 400 unsigned NumParams; 401 }; 402 403 // clang-format off 404 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 405 {LibFunc_free, {1}}, 406 {LibFunc_ZdlPv, {1}}, // operator delete(void*) 407 {LibFunc_ZdaPv, {1}}, // operator delete[](void*) 408 {LibFunc_msvc_delete_ptr32, {1}}, // operator delete(void*) 409 {LibFunc_msvc_delete_ptr64, {1}}, // operator delete(void*) 410 {LibFunc_msvc_delete_array_ptr32, {1}}, // operator delete[](void*) 411 {LibFunc_msvc_delete_array_ptr64, {1}}, // operator delete[](void*) 412 {LibFunc_ZdlPvj, {2}}, // delete(void*, uint) 413 {LibFunc_ZdlPvm, {2}}, // delete(void*, ulong) 414 {LibFunc_ZdlPvRKSt9nothrow_t, {2}}, // delete(void*, nothrow) 415 {LibFunc_ZdlPvSt11align_val_t, {2}}, // delete(void*, align_val_t) 416 {LibFunc_ZdaPvj, {2}}, // delete[](void*, uint) 417 {LibFunc_ZdaPvm, {2}}, // delete[](void*, ulong) 418 {LibFunc_ZdaPvRKSt9nothrow_t, {2}}, // delete[](void*, nothrow) 419 {LibFunc_ZdaPvSt11align_val_t, {2}}, // delete[](void*, align_val_t) 420 {LibFunc_msvc_delete_ptr32_int, {2}}, // delete(void*, uint) 421 {LibFunc_msvc_delete_ptr64_longlong, {2}}, // delete(void*, ulonglong) 422 {LibFunc_msvc_delete_ptr32_nothrow, {2}}, // delete(void*, nothrow) 423 {LibFunc_msvc_delete_ptr64_nothrow, {2}}, // delete(void*, nothrow) 424 {LibFunc_msvc_delete_array_ptr32_int, {2}}, // delete[](void*, uint) 425 {LibFunc_msvc_delete_array_ptr64_longlong, {2}}, // delete[](void*, ulonglong) 426 {LibFunc_msvc_delete_array_ptr32_nothrow, {2}}, // delete[](void*, nothrow) 427 {LibFunc_msvc_delete_array_ptr64_nothrow, {2}}, // delete[](void*, nothrow) 428 {LibFunc___kmpc_free_shared, {2}}, // OpenMP Offloading RTL free 429 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3}}, // delete(void*, align_val_t, nothrow) 430 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3}}, // delete[](void*, align_val_t, nothrow) 431 {LibFunc_ZdlPvjSt11align_val_t, {3}}, // delete(void*, unsigned int, align_val_t) 432 {LibFunc_ZdlPvmSt11align_val_t, {3}}, // delete(void*, unsigned long, align_val_t) 433 {LibFunc_ZdaPvjSt11align_val_t, {3}}, // delete[](void*, unsigned int, align_val_t) 434 {LibFunc_ZdaPvmSt11align_val_t, {3}}, // delete[](void*, unsigned long, align_val_t) 435 }; 436 // clang-format on 437 438 Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 439 const LibFunc TLIFn) { 440 const auto *Iter = 441 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 442 return P.first == TLIFn; 443 }); 444 if (Iter == std::end(FreeFnData)) 445 return None; 446 return Iter->second; 447 } 448 449 /// isLibFreeFunction - Returns true if the function is a builtin free() 450 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 451 Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 452 if (!FnData.hasValue()) 453 return false; 454 455 // Check free prototype. 456 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 457 // attribute will exist. 458 FunctionType *FTy = F->getFunctionType(); 459 if (!FTy->getReturnType()->isVoidTy()) 460 return false; 461 if (FTy->getNumParams() != FnData->NumParams) 462 return false; 463 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 464 return false; 465 466 return true; 467 } 468 469 /// isFreeCall - Returns non-null if the value is a call to the builtin free() 470 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { 471 bool IsNoBuiltinCall; 472 const Function *Callee = getCalledFunction(I, IsNoBuiltinCall); 473 if (Callee == nullptr || IsNoBuiltinCall) 474 return nullptr; 475 476 LibFunc TLIFn; 477 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 478 return nullptr; 479 480 return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr; 481 } 482 483 484 //===----------------------------------------------------------------------===// 485 // Utility functions to compute size of objects. 486 // 487 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 488 if (Data.second.isNegative() || Data.first.ult(Data.second)) 489 return APInt(Data.first.getBitWidth(), 0); 490 return Data.first - Data.second; 491 } 492 493 /// Compute the size of the object pointed by Ptr. Returns true and the 494 /// object size in Size if successful, and false otherwise. 495 /// If RoundToAlign is true, then Size is rounded up to the alignment of 496 /// allocas, byval arguments, and global variables. 497 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 498 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 499 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 500 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 501 if (!Visitor.bothKnown(Data)) 502 return false; 503 504 Size = getSizeWithOverflow(Data).getZExtValue(); 505 return true; 506 } 507 508 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 509 const DataLayout &DL, 510 const TargetLibraryInfo *TLI, 511 bool MustSucceed) { 512 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 513 "ObjectSize must be a call to llvm.objectsize!"); 514 515 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 516 ObjectSizeOpts EvalOptions; 517 // Unless we have to fold this to something, try to be as accurate as 518 // possible. 519 if (MustSucceed) 520 EvalOptions.EvalMode = 521 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 522 else 523 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 524 525 EvalOptions.NullIsUnknownSize = 526 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 527 528 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 529 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 530 if (StaticOnly) { 531 // FIXME: Does it make sense to just return a failure value if the size won't 532 // fit in the output and `!MustSucceed`? 533 uint64_t Size; 534 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 535 isUIntN(ResultType->getBitWidth(), Size)) 536 return ConstantInt::get(ResultType, Size); 537 } else { 538 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 539 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 540 SizeOffsetEvalType SizeOffsetPair = 541 Eval.compute(ObjectSize->getArgOperand(0)); 542 543 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 544 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 545 Builder.SetInsertPoint(ObjectSize); 546 547 // If we've outside the end of the object, then we can always access 548 // exactly 0 bytes. 549 Value *ResultSize = 550 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 551 Value *UseZero = 552 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 553 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 554 Value *Ret = Builder.CreateSelect( 555 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 556 557 // The non-constant size expression cannot evaluate to -1. 558 if (!isa<Constant>(SizeOffsetPair.first) || 559 !isa<Constant>(SizeOffsetPair.second)) 560 Builder.CreateAssumption( 561 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 562 563 return Ret; 564 } 565 } 566 567 if (!MustSucceed) 568 return nullptr; 569 570 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 571 } 572 573 STATISTIC(ObjectVisitorArgument, 574 "Number of arguments with unsolved size and offset"); 575 STATISTIC(ObjectVisitorLoad, 576 "Number of load instructions with unsolved size and offset"); 577 578 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 579 if (Options.RoundToAlign && Alignment) 580 return APInt(IntTyBits, alignTo(Size.getZExtValue(), Alignment)); 581 return Size; 582 } 583 584 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 585 const TargetLibraryInfo *TLI, 586 LLVMContext &Context, 587 ObjectSizeOpts Options) 588 : DL(DL), TLI(TLI), Options(Options) { 589 // Pointer size must be rechecked for each object visited since it could have 590 // a different address space. 591 } 592 593 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 594 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 595 Zero = APInt::getZero(IntTyBits); 596 597 V = V->stripPointerCasts(); 598 if (Instruction *I = dyn_cast<Instruction>(V)) { 599 // If we have already seen this instruction, bail out. Cycles can happen in 600 // unreachable code after constant propagation. 601 if (!SeenInsts.insert(I).second) 602 return unknown(); 603 604 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 605 return visitGEPOperator(*GEP); 606 return visit(*I); 607 } 608 if (Argument *A = dyn_cast<Argument>(V)) 609 return visitArgument(*A); 610 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 611 return visitConstantPointerNull(*P); 612 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 613 return visitGlobalAlias(*GA); 614 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 615 return visitGlobalVariable(*GV); 616 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 617 return visitUndefValue(*UV); 618 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 619 if (CE->getOpcode() == Instruction::IntToPtr) 620 return unknown(); // clueless 621 if (CE->getOpcode() == Instruction::GetElementPtr) 622 return visitGEPOperator(cast<GEPOperator>(*CE)); 623 } 624 625 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 626 << *V << '\n'); 627 return unknown(); 628 } 629 630 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 631 return ::CheckedZextOrTrunc(I, IntTyBits); 632 } 633 634 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 635 if (!I.getAllocatedType()->isSized()) 636 return unknown(); 637 638 if (isa<ScalableVectorType>(I.getAllocatedType())) 639 return unknown(); 640 641 APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); 642 if (!I.isArrayAllocation()) 643 return std::make_pair(align(Size, I.getAlign()), Zero); 644 645 Value *ArraySize = I.getArraySize(); 646 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 647 APInt NumElems = C->getValue(); 648 if (!CheckedZextOrTrunc(NumElems)) 649 return unknown(); 650 651 bool Overflow; 652 Size = Size.umul_ov(NumElems, Overflow); 653 return Overflow ? unknown() 654 : std::make_pair(align(Size, I.getAlign()), Zero); 655 } 656 return unknown(); 657 } 658 659 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 660 Type *MemoryTy = A.getPointeeInMemoryValueType(); 661 // No interprocedural analysis is done at the moment. 662 if (!MemoryTy|| !MemoryTy->isSized()) { 663 ++ObjectVisitorArgument; 664 return unknown(); 665 } 666 667 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 668 return std::make_pair(align(Size, A.getParamAlign()), Zero); 669 } 670 671 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 672 auto Mapper = [](const Value *V) { return V; }; 673 if (Optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) 674 return std::make_pair(*Size, Zero); 675 return unknown(); 676 } 677 678 SizeOffsetType 679 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 680 // If null is unknown, there's nothing we can do. Additionally, non-zero 681 // address spaces can make use of null, so we don't presume to know anything 682 // about that. 683 // 684 // TODO: How should this work with address space casts? We currently just drop 685 // them on the floor, but it's unclear what we should do when a NULL from 686 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 687 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 688 return unknown(); 689 return std::make_pair(Zero, Zero); 690 } 691 692 SizeOffsetType 693 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 694 return unknown(); 695 } 696 697 SizeOffsetType 698 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 699 // Easy cases were already folded by previous passes. 700 return unknown(); 701 } 702 703 SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) { 704 SizeOffsetType PtrData = compute(GEP.getPointerOperand()); 705 APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0); 706 if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset)) 707 return unknown(); 708 709 return std::make_pair(PtrData.first, PtrData.second + Offset); 710 } 711 712 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 713 if (GA.isInterposable()) 714 return unknown(); 715 return compute(GA.getAliasee()); 716 } 717 718 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 719 if (!GV.hasDefinitiveInitializer()) 720 return unknown(); 721 722 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 723 return std::make_pair(align(Size, GV.getAlign()), Zero); 724 } 725 726 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 727 // clueless 728 return unknown(); 729 } 730 731 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { 732 ++ObjectVisitorLoad; 733 return unknown(); 734 } 735 736 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { 737 // too complex to analyze statically. 738 return unknown(); 739 } 740 741 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 742 SizeOffsetType TrueSide = compute(I.getTrueValue()); 743 SizeOffsetType FalseSide = compute(I.getFalseValue()); 744 if (bothKnown(TrueSide) && bothKnown(FalseSide)) { 745 if (TrueSide == FalseSide) { 746 return TrueSide; 747 } 748 749 APInt TrueResult = getSizeWithOverflow(TrueSide); 750 APInt FalseResult = getSizeWithOverflow(FalseSide); 751 752 if (TrueResult == FalseResult) { 753 return TrueSide; 754 } 755 if (Options.EvalMode == ObjectSizeOpts::Mode::Min) { 756 if (TrueResult.slt(FalseResult)) 757 return TrueSide; 758 return FalseSide; 759 } 760 if (Options.EvalMode == ObjectSizeOpts::Mode::Max) { 761 if (TrueResult.sgt(FalseResult)) 762 return TrueSide; 763 return FalseSide; 764 } 765 } 766 return unknown(); 767 } 768 769 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 770 return std::make_pair(Zero, Zero); 771 } 772 773 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 774 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 775 << '\n'); 776 return unknown(); 777 } 778 779 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 780 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 781 ObjectSizeOpts EvalOpts) 782 : DL(DL), TLI(TLI), Context(Context), 783 Builder(Context, TargetFolder(DL), 784 IRBuilderCallbackInserter( 785 [&](Instruction *I) { InsertedInstructions.insert(I); })), 786 EvalOpts(EvalOpts) { 787 // IntTy and Zero must be set for each compute() since the address space may 788 // be different for later objects. 789 } 790 791 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 792 // XXX - Are vectors of pointers possible here? 793 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 794 Zero = ConstantInt::get(IntTy, 0); 795 796 SizeOffsetEvalType Result = compute_(V); 797 798 if (!bothKnown(Result)) { 799 // Erase everything that was computed in this iteration from the cache, so 800 // that no dangling references are left behind. We could be a bit smarter if 801 // we kept a dependency graph. It's probably not worth the complexity. 802 for (const Value *SeenVal : SeenVals) { 803 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 804 // non-computable results can be safely cached 805 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 806 CacheMap.erase(CacheIt); 807 } 808 809 // Erase any instructions we inserted as part of the traversal. 810 for (Instruction *I : InsertedInstructions) { 811 I->replaceAllUsesWith(UndefValue::get(I->getType())); 812 I->eraseFromParent(); 813 } 814 } 815 816 SeenVals.clear(); 817 InsertedInstructions.clear(); 818 return Result; 819 } 820 821 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 822 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 823 SizeOffsetType Const = Visitor.compute(V); 824 if (Visitor.bothKnown(Const)) 825 return std::make_pair(ConstantInt::get(Context, Const.first), 826 ConstantInt::get(Context, Const.second)); 827 828 V = V->stripPointerCasts(); 829 830 // Check cache. 831 CacheMapTy::iterator CacheIt = CacheMap.find(V); 832 if (CacheIt != CacheMap.end()) 833 return CacheIt->second; 834 835 // Always generate code immediately before the instruction being 836 // processed, so that the generated code dominates the same BBs. 837 BuilderTy::InsertPointGuard Guard(Builder); 838 if (Instruction *I = dyn_cast<Instruction>(V)) 839 Builder.SetInsertPoint(I); 840 841 // Now compute the size and offset. 842 SizeOffsetEvalType Result; 843 844 // Record the pointers that were handled in this run, so that they can be 845 // cleaned later if something fails. We also use this set to break cycles that 846 // can occur in dead code. 847 if (!SeenVals.insert(V).second) { 848 Result = unknown(); 849 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 850 Result = visitGEPOperator(*GEP); 851 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 852 Result = visit(*I); 853 } else if (isa<Argument>(V) || 854 (isa<ConstantExpr>(V) && 855 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 856 isa<GlobalAlias>(V) || 857 isa<GlobalVariable>(V)) { 858 // Ignore values where we cannot do more than ObjectSizeVisitor. 859 Result = unknown(); 860 } else { 861 LLVM_DEBUG( 862 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 863 << '\n'); 864 Result = unknown(); 865 } 866 867 // Don't reuse CacheIt since it may be invalid at this point. 868 CacheMap[V] = Result; 869 return Result; 870 } 871 872 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 873 if (!I.getAllocatedType()->isSized()) 874 return unknown(); 875 876 // must be a VLA 877 assert(I.isArrayAllocation()); 878 879 // If needed, adjust the alloca's operand size to match the pointer size. 880 // Subsequent math operations expect the types to match. 881 Value *ArraySize = Builder.CreateZExtOrTrunc( 882 I.getArraySize(), DL.getIntPtrType(I.getContext())); 883 assert(ArraySize->getType() == Zero->getType() && 884 "Expected zero constant to have pointer type"); 885 886 Value *Size = ConstantInt::get(ArraySize->getType(), 887 DL.getTypeAllocSize(I.getAllocatedType())); 888 Size = Builder.CreateMul(Size, ArraySize); 889 return std::make_pair(Size, Zero); 890 } 891 892 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 893 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 894 if (!FnData) 895 return unknown(); 896 897 // Handle strdup-like functions separately. 898 if (FnData->AllocTy == StrDupLike) { 899 // TODO: implement evaluation of strdup/strndup 900 return unknown(); 901 } 902 903 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 904 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 905 if (FnData->SndParam < 0) 906 return std::make_pair(FirstArg, Zero); 907 908 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 909 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 910 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 911 return std::make_pair(Size, Zero); 912 } 913 914 SizeOffsetEvalType 915 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 916 return unknown(); 917 } 918 919 SizeOffsetEvalType 920 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 921 return unknown(); 922 } 923 924 SizeOffsetEvalType 925 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 926 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 927 if (!bothKnown(PtrData)) 928 return unknown(); 929 930 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 931 Offset = Builder.CreateAdd(PtrData.second, Offset); 932 return std::make_pair(PtrData.first, Offset); 933 } 934 935 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 936 // clueless 937 return unknown(); 938 } 939 940 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { 941 return unknown(); 942 } 943 944 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 945 // Create 2 PHIs: one for size and another for offset. 946 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 947 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 948 949 // Insert right away in the cache to handle recursive PHIs. 950 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 951 952 // Compute offset/size for each PHI incoming pointer. 953 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 954 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 955 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 956 957 if (!bothKnown(EdgeData)) { 958 OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy)); 959 OffsetPHI->eraseFromParent(); 960 InsertedInstructions.erase(OffsetPHI); 961 SizePHI->replaceAllUsesWith(UndefValue::get(IntTy)); 962 SizePHI->eraseFromParent(); 963 InsertedInstructions.erase(SizePHI); 964 return unknown(); 965 } 966 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 967 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 968 } 969 970 Value *Size = SizePHI, *Offset = OffsetPHI; 971 if (Value *Tmp = SizePHI->hasConstantValue()) { 972 Size = Tmp; 973 SizePHI->replaceAllUsesWith(Size); 974 SizePHI->eraseFromParent(); 975 InsertedInstructions.erase(SizePHI); 976 } 977 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 978 Offset = Tmp; 979 OffsetPHI->replaceAllUsesWith(Offset); 980 OffsetPHI->eraseFromParent(); 981 InsertedInstructions.erase(OffsetPHI); 982 } 983 return std::make_pair(Size, Offset); 984 } 985 986 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 987 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 988 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 989 990 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 991 return unknown(); 992 if (TrueSide == FalseSide) 993 return TrueSide; 994 995 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 996 FalseSide.first); 997 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 998 FalseSide.second); 999 return std::make_pair(Size, Offset); 1000 } 1001 1002 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1003 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1004 << '\n'); 1005 return unknown(); 1006 } 1007