1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/Analysis/TargetFolder.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/Analysis/Utils/Local.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/IR/Argument.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/Operator.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <cassert> 44 #include <cstdint> 45 #include <iterator> 46 #include <type_traits> 47 #include <utility> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "memory-builtins" 52 53 enum AllocType : uint8_t { 54 OpNewLike = 1<<0, // allocates; never returns null 55 MallocLike = 1<<1, // allocates; may return null 56 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 57 CallocLike = 1<<3, // allocates + bzero 58 ReallocLike = 1<<4, // reallocates 59 StrDupLike = 1<<5, 60 MallocOrOpNewLike = MallocLike | OpNewLike, 61 MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike, 62 AllocLike = MallocOrCallocLike | StrDupLike, 63 AnyAlloc = AllocLike | ReallocLike 64 }; 65 66 struct AllocFnsTy { 67 AllocType AllocTy; 68 unsigned NumParams; 69 // First and Second size parameters (or -1 if unused) 70 int FstParam, SndParam; 71 // Alignment parameter for aligned_alloc and aligned new 72 int AlignParam; 73 }; 74 75 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 76 // know which functions are nounwind, noalias, nocapture parameters, etc. 77 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 78 {LibFunc_malloc, {MallocLike, 1, 0, -1, -1}}, 79 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1}}, 80 {LibFunc_valloc, {MallocLike, 1, 0, -1, -1}}, 81 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int) 82 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow) 83 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned int, align_val_t) 84 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned int, align_val_t, nothrow) 85 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long) 86 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long, nothrow) 87 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned long, align_val_t) 88 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned long, align_val_t, nothrow) 89 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int) 90 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow) 91 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned int, align_val_t) 92 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned int, align_val_t, nothrow) 93 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long) 94 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long, nothrow) 95 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned long, align_val_t) 96 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned long, align_val_t, nothrow) 97 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int) 98 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow) 99 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long long) 100 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long long, nothrow) 101 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int) 102 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow) 103 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long long) 104 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long long, nothrow) 105 {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0}}, 106 {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0}}, 107 {LibFunc_calloc, {CallocLike, 2, 0, 1, -1}}, 108 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1}}, 109 {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1}}, 110 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1}}, 111 {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1}}, 112 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1}}, 113 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1}}, 114 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1}}, 115 // TODO: Handle "int posix_memalign(void **, size_t, size_t)" 116 }; 117 118 static const Function *getCalledFunction(const Value *V, 119 bool &IsNoBuiltin) { 120 // Don't care about intrinsics in this case. 121 if (isa<IntrinsicInst>(V)) 122 return nullptr; 123 124 const auto *CB = dyn_cast<CallBase>(V); 125 if (!CB) 126 return nullptr; 127 128 IsNoBuiltin = CB->isNoBuiltin(); 129 130 if (const Function *Callee = CB->getCalledFunction()) 131 return Callee; 132 return nullptr; 133 } 134 135 /// Returns the allocation data for the given value if it's a call to a known 136 /// allocation function. 137 static Optional<AllocFnsTy> 138 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 139 const TargetLibraryInfo *TLI) { 140 // Make sure that the function is available. 141 LibFunc TLIFn; 142 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 143 return None; 144 145 const auto *Iter = find_if( 146 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 147 return P.first == TLIFn; 148 }); 149 150 if (Iter == std::end(AllocationFnData)) 151 return None; 152 153 const AllocFnsTy *FnData = &Iter->second; 154 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 155 return None; 156 157 // Check function prototype. 158 int FstParam = FnData->FstParam; 159 int SndParam = FnData->SndParam; 160 FunctionType *FTy = Callee->getFunctionType(); 161 162 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 163 FTy->getNumParams() == FnData->NumParams && 164 (FstParam < 0 || 165 (FTy->getParamType(FstParam)->isIntegerTy(32) || 166 FTy->getParamType(FstParam)->isIntegerTy(64))) && 167 (SndParam < 0 || 168 FTy->getParamType(SndParam)->isIntegerTy(32) || 169 FTy->getParamType(SndParam)->isIntegerTy(64))) 170 return *FnData; 171 return None; 172 } 173 174 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 175 const TargetLibraryInfo *TLI) { 176 bool IsNoBuiltinCall; 177 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 178 if (!IsNoBuiltinCall) 179 return getAllocationDataForFunction(Callee, AllocTy, TLI); 180 return None; 181 } 182 183 static Optional<AllocFnsTy> 184 getAllocationData(const Value *V, AllocType AllocTy, 185 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 186 bool IsNoBuiltinCall; 187 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 188 if (!IsNoBuiltinCall) 189 return getAllocationDataForFunction( 190 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 191 return None; 192 } 193 194 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 195 const TargetLibraryInfo *TLI) { 196 bool IsNoBuiltinCall; 197 const Function *Callee = 198 getCalledFunction(V, IsNoBuiltinCall); 199 if (!Callee) 200 return None; 201 202 // Prefer to use existing information over allocsize. This will give us an 203 // accurate AllocTy. 204 if (!IsNoBuiltinCall) 205 if (Optional<AllocFnsTy> Data = 206 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 207 return Data; 208 209 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 210 if (Attr == Attribute()) 211 return None; 212 213 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 214 215 AllocFnsTy Result; 216 // Because allocsize only tells us how many bytes are allocated, we're not 217 // really allowed to assume anything, so we use MallocLike. 218 Result.AllocTy = MallocLike; 219 Result.NumParams = Callee->getNumOperands(); 220 Result.FstParam = Args.first; 221 Result.SndParam = Args.second.getValueOr(-1); 222 // Allocsize has no way to specify an alignment argument 223 Result.AlignParam = -1; 224 return Result; 225 } 226 227 /// Tests if a value is a call or invoke to a library function that 228 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 229 /// like). 230 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 231 return getAllocationData(V, AnyAlloc, TLI).hasValue(); 232 } 233 bool llvm::isAllocationFn( 234 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 235 return getAllocationData(V, AnyAlloc, GetTLI).hasValue(); 236 } 237 238 /// Tests if a value is a call or invoke to a library function that 239 /// allocates uninitialized memory (such as malloc). 240 static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 241 return getAllocationData(V, MallocOrOpNewLike, TLI).hasValue(); 242 } 243 244 /// Tests if a value is a call or invoke to a library function that 245 /// allocates uninitialized memory with alignment (such as aligned_alloc). 246 static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 247 return getAllocationData(V, AlignedAllocLike, TLI) 248 .hasValue(); 249 } 250 251 /// Tests if a value is a call or invoke to a library function that 252 /// allocates zero-filled memory (such as calloc). 253 static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 254 return getAllocationData(V, CallocLike, TLI).hasValue(); 255 } 256 257 /// Tests if a value is a call or invoke to a library function that 258 /// allocates memory similar to malloc or calloc. 259 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 260 return getAllocationData(V, MallocOrCallocLike, TLI).hasValue(); 261 } 262 263 /// Tests if a value is a call or invoke to a library function that 264 /// allocates memory (either malloc, calloc, or strdup like). 265 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 266 return getAllocationData(V, AllocLike, TLI).hasValue(); 267 } 268 269 /// Tests if a value is a call or invoke to a library function that 270 /// reallocates memory (e.g., realloc). 271 bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 272 return getAllocationData(V, ReallocLike, TLI).hasValue(); 273 } 274 275 /// Tests if a functions is a call or invoke to a library function that 276 /// reallocates memory (e.g., realloc). 277 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 278 return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue(); 279 } 280 281 bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) { 282 assert(isAllocationFn(CB, TLI)); 283 284 // Note: Removability is highly dependent on the source language. For 285 // example, recent C++ requires direct calls to the global allocation 286 // [basic.stc.dynamic.allocation] to be observable unless part of a new 287 // expression [expr.new paragraph 13]. 288 289 // Historically we've treated the C family allocation routines as removable 290 return isAllocLikeFn(CB, TLI); 291 } 292 293 Value *llvm::getAllocAlignment(const CallBase *V, 294 const TargetLibraryInfo *TLI) { 295 assert(isAllocationFn(V, TLI)); 296 297 const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 298 if (!FnData.hasValue() || FnData->AlignParam < 0) { 299 return nullptr; 300 } 301 return V->getOperand(FnData->AlignParam); 302 } 303 304 /// When we're compiling N-bit code, and the user uses parameters that are 305 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 306 /// trouble with APInt size issues. This function handles resizing + overflow 307 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 308 /// I's value. 309 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 310 // More bits than we can handle. Checking the bit width isn't necessary, but 311 // it's faster than checking active bits, and should give `false` in the 312 // vast majority of cases. 313 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 314 return false; 315 if (I.getBitWidth() != IntTyBits) 316 I = I.zextOrTrunc(IntTyBits); 317 return true; 318 } 319 320 Optional<APInt> 321 llvm::getAllocSize(const CallBase *CB, 322 const TargetLibraryInfo *TLI, 323 std::function<const Value*(const Value*)> Mapper) { 324 // Note: This handles both explicitly listed allocation functions and 325 // allocsize. The code structure could stand to be cleaned up a bit. 326 Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 327 if (!FnData) 328 return None; 329 330 // Get the index type for this address space, results and intermediate 331 // computations are performed at that width. 332 auto &DL = CB->getModule()->getDataLayout(); 333 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 334 335 // Handle strdup-like functions separately. 336 if (FnData->AllocTy == StrDupLike) { 337 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 338 if (!Size) 339 return None; 340 341 // Strndup limits strlen. 342 if (FnData->FstParam > 0) { 343 const ConstantInt *Arg = 344 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 345 if (!Arg) 346 return None; 347 348 APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); 349 if (Size.ugt(MaxSize)) 350 Size = MaxSize + 1; 351 } 352 return Size; 353 } 354 355 const ConstantInt *Arg = 356 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 357 if (!Arg) 358 return None; 359 360 APInt Size = Arg->getValue(); 361 if (!CheckedZextOrTrunc(Size, IntTyBits)) 362 return None; 363 364 // Size is determined by just 1 parameter. 365 if (FnData->SndParam < 0) 366 return Size; 367 368 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 369 if (!Arg) 370 return None; 371 372 APInt NumElems = Arg->getValue(); 373 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 374 return None; 375 376 bool Overflow; 377 Size = Size.umul_ov(NumElems, Overflow); 378 if (Overflow) 379 return None; 380 return Size; 381 } 382 383 Constant *llvm::getInitialValueOfAllocation(const CallBase *Alloc, 384 const TargetLibraryInfo *TLI, 385 Type *Ty) { 386 assert(isAllocationFn(Alloc, TLI)); 387 388 // malloc and aligned_alloc are uninitialized (undef) 389 if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI)) 390 return UndefValue::get(Ty); 391 392 // calloc zero initializes 393 if (isCallocLikeFn(Alloc, TLI)) 394 return Constant::getNullValue(Ty); 395 396 return nullptr; 397 } 398 399 struct FreeFnsTy { 400 unsigned NumParams; 401 }; 402 403 // clang-format off 404 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 405 {LibFunc_free, {1}}, 406 {LibFunc_ZdlPv, {1}}, // operator delete(void*) 407 {LibFunc_ZdaPv, {1}}, // operator delete[](void*) 408 {LibFunc_msvc_delete_ptr32, {1}}, // operator delete(void*) 409 {LibFunc_msvc_delete_ptr64, {1}}, // operator delete(void*) 410 {LibFunc_msvc_delete_array_ptr32, {1}}, // operator delete[](void*) 411 {LibFunc_msvc_delete_array_ptr64, {1}}, // operator delete[](void*) 412 {LibFunc_ZdlPvj, {2}}, // delete(void*, uint) 413 {LibFunc_ZdlPvm, {2}}, // delete(void*, ulong) 414 {LibFunc_ZdlPvRKSt9nothrow_t, {2}}, // delete(void*, nothrow) 415 {LibFunc_ZdlPvSt11align_val_t, {2}}, // delete(void*, align_val_t) 416 {LibFunc_ZdaPvj, {2}}, // delete[](void*, uint) 417 {LibFunc_ZdaPvm, {2}}, // delete[](void*, ulong) 418 {LibFunc_ZdaPvRKSt9nothrow_t, {2}}, // delete[](void*, nothrow) 419 {LibFunc_ZdaPvSt11align_val_t, {2}}, // delete[](void*, align_val_t) 420 {LibFunc_msvc_delete_ptr32_int, {2}}, // delete(void*, uint) 421 {LibFunc_msvc_delete_ptr64_longlong, {2}}, // delete(void*, ulonglong) 422 {LibFunc_msvc_delete_ptr32_nothrow, {2}}, // delete(void*, nothrow) 423 {LibFunc_msvc_delete_ptr64_nothrow, {2}}, // delete(void*, nothrow) 424 {LibFunc_msvc_delete_array_ptr32_int, {2}}, // delete[](void*, uint) 425 {LibFunc_msvc_delete_array_ptr64_longlong, {2}}, // delete[](void*, ulonglong) 426 {LibFunc_msvc_delete_array_ptr32_nothrow, {2}}, // delete[](void*, nothrow) 427 {LibFunc_msvc_delete_array_ptr64_nothrow, {2}}, // delete[](void*, nothrow) 428 {LibFunc___kmpc_free_shared, {2}}, // OpenMP Offloading RTL free 429 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3}}, // delete(void*, align_val_t, nothrow) 430 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3}}, // delete[](void*, align_val_t, nothrow) 431 {LibFunc_ZdlPvjSt11align_val_t, {3}}, // delete(void*, unsigned int, align_val_t) 432 {LibFunc_ZdlPvmSt11align_val_t, {3}}, // delete(void*, unsigned long, align_val_t) 433 {LibFunc_ZdaPvjSt11align_val_t, {3}}, // delete[](void*, unsigned int, align_val_t) 434 {LibFunc_ZdaPvmSt11align_val_t, {3}}, // delete[](void*, unsigned long, align_val_t) 435 }; 436 // clang-format on 437 438 Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 439 const LibFunc TLIFn) { 440 const auto *Iter = 441 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 442 return P.first == TLIFn; 443 }); 444 if (Iter == std::end(FreeFnData)) 445 return None; 446 return Iter->second; 447 } 448 449 /// isLibFreeFunction - Returns true if the function is a builtin free() 450 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 451 Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 452 if (!FnData.hasValue()) 453 return false; 454 455 // Check free prototype. 456 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 457 // attribute will exist. 458 FunctionType *FTy = F->getFunctionType(); 459 if (!FTy->getReturnType()->isVoidTy()) 460 return false; 461 if (FTy->getNumParams() != FnData->NumParams) 462 return false; 463 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 464 return false; 465 466 return true; 467 } 468 469 /// isFreeCall - Returns non-null if the value is a call to the builtin free() 470 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { 471 bool IsNoBuiltinCall; 472 const Function *Callee = getCalledFunction(I, IsNoBuiltinCall); 473 if (Callee == nullptr || IsNoBuiltinCall) 474 return nullptr; 475 476 LibFunc TLIFn; 477 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 478 return nullptr; 479 480 return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr; 481 } 482 483 484 //===----------------------------------------------------------------------===// 485 // Utility functions to compute size of objects. 486 // 487 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 488 if (Data.second.isNegative() || Data.first.ult(Data.second)) 489 return APInt(Data.first.getBitWidth(), 0); 490 return Data.first - Data.second; 491 } 492 493 /// Compute the size of the object pointed by Ptr. Returns true and the 494 /// object size in Size if successful, and false otherwise. 495 /// If RoundToAlign is true, then Size is rounded up to the alignment of 496 /// allocas, byval arguments, and global variables. 497 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 498 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 499 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 500 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 501 if (!Visitor.bothKnown(Data)) 502 return false; 503 504 Size = getSizeWithOverflow(Data).getZExtValue(); 505 return true; 506 } 507 508 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 509 const DataLayout &DL, 510 const TargetLibraryInfo *TLI, 511 bool MustSucceed) { 512 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 513 "ObjectSize must be a call to llvm.objectsize!"); 514 515 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 516 ObjectSizeOpts EvalOptions; 517 // Unless we have to fold this to something, try to be as accurate as 518 // possible. 519 if (MustSucceed) 520 EvalOptions.EvalMode = 521 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 522 else 523 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 524 525 EvalOptions.NullIsUnknownSize = 526 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 527 528 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 529 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 530 if (StaticOnly) { 531 // FIXME: Does it make sense to just return a failure value if the size won't 532 // fit in the output and `!MustSucceed`? 533 uint64_t Size; 534 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 535 isUIntN(ResultType->getBitWidth(), Size)) 536 return ConstantInt::get(ResultType, Size); 537 } else { 538 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 539 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 540 SizeOffsetEvalType SizeOffsetPair = 541 Eval.compute(ObjectSize->getArgOperand(0)); 542 543 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 544 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 545 Builder.SetInsertPoint(ObjectSize); 546 547 // If we've outside the end of the object, then we can always access 548 // exactly 0 bytes. 549 Value *ResultSize = 550 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 551 Value *UseZero = 552 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 553 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 554 Value *Ret = Builder.CreateSelect( 555 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 556 557 // The non-constant size expression cannot evaluate to -1. 558 if (!isa<Constant>(SizeOffsetPair.first) || 559 !isa<Constant>(SizeOffsetPair.second)) 560 Builder.CreateAssumption( 561 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 562 563 return Ret; 564 } 565 } 566 567 if (!MustSucceed) 568 return nullptr; 569 570 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 571 } 572 573 STATISTIC(ObjectVisitorArgument, 574 "Number of arguments with unsolved size and offset"); 575 STATISTIC(ObjectVisitorLoad, 576 "Number of load instructions with unsolved size and offset"); 577 578 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 579 if (Options.RoundToAlign && Alignment) 580 return APInt(IntTyBits, alignTo(Size.getZExtValue(), Alignment)); 581 return Size; 582 } 583 584 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 585 const TargetLibraryInfo *TLI, 586 LLVMContext &Context, 587 ObjectSizeOpts Options) 588 : DL(DL), TLI(TLI), Options(Options) { 589 // Pointer size must be rechecked for each object visited since it could have 590 // a different address space. 591 } 592 593 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 594 unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 595 596 // Stripping pointer casts can strip address space casts which can change the 597 // index type size. The invariant is that we use the value type to determine 598 // the index type size and if we stripped address space casts we have to 599 // readjust the APInt as we pass it upwards in order for the APInt to match 600 // the type the caller passed in. 601 APInt Offset(InitialIntTyBits, 0); 602 V = V->stripAndAccumulateConstantOffsets( 603 DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true); 604 605 // Later we use the index type size and zero but it will match the type of the 606 // value that is passed to computeImpl. 607 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 608 Zero = APInt::getZero(IntTyBits); 609 610 bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; 611 if (!IndexTypeSizeChanged && Offset.isZero()) 612 return computeImpl(V); 613 614 // We stripped an address space cast that changed the index type size or we 615 // accumulated some constant offset (or both). Readjust the bit width to match 616 // the argument index type size and apply the offset, as required. 617 SizeOffsetType SOT = computeImpl(V); 618 if (IndexTypeSizeChanged) { 619 if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) 620 SOT.first = APInt(); 621 if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) 622 SOT.second = APInt(); 623 } 624 // If the computed offset is "unknown" we cannot add the stripped offset. 625 return {SOT.first, 626 SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; 627 } 628 629 SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { 630 if (Instruction *I = dyn_cast<Instruction>(V)) { 631 // If we have already seen this instruction, bail out. Cycles can happen in 632 // unreachable code after constant propagation. 633 if (!SeenInsts.insert(I).second) 634 return unknown(); 635 636 return visit(*I); 637 } 638 if (Argument *A = dyn_cast<Argument>(V)) 639 return visitArgument(*A); 640 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 641 return visitConstantPointerNull(*P); 642 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 643 return visitGlobalAlias(*GA); 644 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 645 return visitGlobalVariable(*GV); 646 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 647 return visitUndefValue(*UV); 648 649 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 650 << *V << '\n'); 651 return unknown(); 652 } 653 654 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 655 return ::CheckedZextOrTrunc(I, IntTyBits); 656 } 657 658 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 659 if (!I.getAllocatedType()->isSized()) 660 return unknown(); 661 662 if (isa<ScalableVectorType>(I.getAllocatedType())) 663 return unknown(); 664 665 APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); 666 if (!I.isArrayAllocation()) 667 return std::make_pair(align(Size, I.getAlign()), Zero); 668 669 Value *ArraySize = I.getArraySize(); 670 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 671 APInt NumElems = C->getValue(); 672 if (!CheckedZextOrTrunc(NumElems)) 673 return unknown(); 674 675 bool Overflow; 676 Size = Size.umul_ov(NumElems, Overflow); 677 return Overflow ? unknown() 678 : std::make_pair(align(Size, I.getAlign()), Zero); 679 } 680 return unknown(); 681 } 682 683 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 684 Type *MemoryTy = A.getPointeeInMemoryValueType(); 685 // No interprocedural analysis is done at the moment. 686 if (!MemoryTy|| !MemoryTy->isSized()) { 687 ++ObjectVisitorArgument; 688 return unknown(); 689 } 690 691 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 692 return std::make_pair(align(Size, A.getParamAlign()), Zero); 693 } 694 695 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 696 auto Mapper = [](const Value *V) { return V; }; 697 if (Optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) 698 return std::make_pair(*Size, Zero); 699 return unknown(); 700 } 701 702 SizeOffsetType 703 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 704 // If null is unknown, there's nothing we can do. Additionally, non-zero 705 // address spaces can make use of null, so we don't presume to know anything 706 // about that. 707 // 708 // TODO: How should this work with address space casts? We currently just drop 709 // them on the floor, but it's unclear what we should do when a NULL from 710 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 711 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 712 return unknown(); 713 return std::make_pair(Zero, Zero); 714 } 715 716 SizeOffsetType 717 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 718 return unknown(); 719 } 720 721 SizeOffsetType 722 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 723 // Easy cases were already folded by previous passes. 724 return unknown(); 725 } 726 727 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 728 if (GA.isInterposable()) 729 return unknown(); 730 return compute(GA.getAliasee()); 731 } 732 733 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 734 if (!GV.hasDefinitiveInitializer()) 735 return unknown(); 736 737 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 738 return std::make_pair(align(Size, GV.getAlign()), Zero); 739 } 740 741 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 742 // clueless 743 return unknown(); 744 } 745 746 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { 747 ++ObjectVisitorLoad; 748 return unknown(); 749 } 750 751 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { 752 // too complex to analyze statically. 753 return unknown(); 754 } 755 756 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 757 SizeOffsetType TrueSide = compute(I.getTrueValue()); 758 SizeOffsetType FalseSide = compute(I.getFalseValue()); 759 if (bothKnown(TrueSide) && bothKnown(FalseSide)) { 760 if (TrueSide == FalseSide) { 761 return TrueSide; 762 } 763 764 APInt TrueResult = getSizeWithOverflow(TrueSide); 765 APInt FalseResult = getSizeWithOverflow(FalseSide); 766 767 if (TrueResult == FalseResult) { 768 return TrueSide; 769 } 770 if (Options.EvalMode == ObjectSizeOpts::Mode::Min) { 771 if (TrueResult.slt(FalseResult)) 772 return TrueSide; 773 return FalseSide; 774 } 775 if (Options.EvalMode == ObjectSizeOpts::Mode::Max) { 776 if (TrueResult.sgt(FalseResult)) 777 return TrueSide; 778 return FalseSide; 779 } 780 } 781 return unknown(); 782 } 783 784 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 785 return std::make_pair(Zero, Zero); 786 } 787 788 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 789 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 790 << '\n'); 791 return unknown(); 792 } 793 794 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 795 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 796 ObjectSizeOpts EvalOpts) 797 : DL(DL), TLI(TLI), Context(Context), 798 Builder(Context, TargetFolder(DL), 799 IRBuilderCallbackInserter( 800 [&](Instruction *I) { InsertedInstructions.insert(I); })), 801 EvalOpts(EvalOpts) { 802 // IntTy and Zero must be set for each compute() since the address space may 803 // be different for later objects. 804 } 805 806 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 807 // XXX - Are vectors of pointers possible here? 808 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 809 Zero = ConstantInt::get(IntTy, 0); 810 811 SizeOffsetEvalType Result = compute_(V); 812 813 if (!bothKnown(Result)) { 814 // Erase everything that was computed in this iteration from the cache, so 815 // that no dangling references are left behind. We could be a bit smarter if 816 // we kept a dependency graph. It's probably not worth the complexity. 817 for (const Value *SeenVal : SeenVals) { 818 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 819 // non-computable results can be safely cached 820 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 821 CacheMap.erase(CacheIt); 822 } 823 824 // Erase any instructions we inserted as part of the traversal. 825 for (Instruction *I : InsertedInstructions) { 826 I->replaceAllUsesWith(UndefValue::get(I->getType())); 827 I->eraseFromParent(); 828 } 829 } 830 831 SeenVals.clear(); 832 InsertedInstructions.clear(); 833 return Result; 834 } 835 836 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 837 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 838 SizeOffsetType Const = Visitor.compute(V); 839 if (Visitor.bothKnown(Const)) 840 return std::make_pair(ConstantInt::get(Context, Const.first), 841 ConstantInt::get(Context, Const.second)); 842 843 V = V->stripPointerCasts(); 844 845 // Check cache. 846 CacheMapTy::iterator CacheIt = CacheMap.find(V); 847 if (CacheIt != CacheMap.end()) 848 return CacheIt->second; 849 850 // Always generate code immediately before the instruction being 851 // processed, so that the generated code dominates the same BBs. 852 BuilderTy::InsertPointGuard Guard(Builder); 853 if (Instruction *I = dyn_cast<Instruction>(V)) 854 Builder.SetInsertPoint(I); 855 856 // Now compute the size and offset. 857 SizeOffsetEvalType Result; 858 859 // Record the pointers that were handled in this run, so that they can be 860 // cleaned later if something fails. We also use this set to break cycles that 861 // can occur in dead code. 862 if (!SeenVals.insert(V).second) { 863 Result = unknown(); 864 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 865 Result = visitGEPOperator(*GEP); 866 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 867 Result = visit(*I); 868 } else if (isa<Argument>(V) || 869 (isa<ConstantExpr>(V) && 870 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 871 isa<GlobalAlias>(V) || 872 isa<GlobalVariable>(V)) { 873 // Ignore values where we cannot do more than ObjectSizeVisitor. 874 Result = unknown(); 875 } else { 876 LLVM_DEBUG( 877 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 878 << '\n'); 879 Result = unknown(); 880 } 881 882 // Don't reuse CacheIt since it may be invalid at this point. 883 CacheMap[V] = Result; 884 return Result; 885 } 886 887 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 888 if (!I.getAllocatedType()->isSized()) 889 return unknown(); 890 891 // must be a VLA 892 assert(I.isArrayAllocation()); 893 894 // If needed, adjust the alloca's operand size to match the pointer size. 895 // Subsequent math operations expect the types to match. 896 Value *ArraySize = Builder.CreateZExtOrTrunc( 897 I.getArraySize(), DL.getIntPtrType(I.getContext())); 898 assert(ArraySize->getType() == Zero->getType() && 899 "Expected zero constant to have pointer type"); 900 901 Value *Size = ConstantInt::get(ArraySize->getType(), 902 DL.getTypeAllocSize(I.getAllocatedType())); 903 Size = Builder.CreateMul(Size, ArraySize); 904 return std::make_pair(Size, Zero); 905 } 906 907 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 908 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 909 if (!FnData) 910 return unknown(); 911 912 // Handle strdup-like functions separately. 913 if (FnData->AllocTy == StrDupLike) { 914 // TODO: implement evaluation of strdup/strndup 915 return unknown(); 916 } 917 918 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 919 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 920 if (FnData->SndParam < 0) 921 return std::make_pair(FirstArg, Zero); 922 923 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 924 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 925 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 926 return std::make_pair(Size, Zero); 927 } 928 929 SizeOffsetEvalType 930 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 931 return unknown(); 932 } 933 934 SizeOffsetEvalType 935 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 936 return unknown(); 937 } 938 939 SizeOffsetEvalType 940 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 941 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 942 if (!bothKnown(PtrData)) 943 return unknown(); 944 945 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 946 Offset = Builder.CreateAdd(PtrData.second, Offset); 947 return std::make_pair(PtrData.first, Offset); 948 } 949 950 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 951 // clueless 952 return unknown(); 953 } 954 955 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { 956 return unknown(); 957 } 958 959 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 960 // Create 2 PHIs: one for size and another for offset. 961 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 962 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 963 964 // Insert right away in the cache to handle recursive PHIs. 965 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 966 967 // Compute offset/size for each PHI incoming pointer. 968 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 969 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 970 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 971 972 if (!bothKnown(EdgeData)) { 973 OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy)); 974 OffsetPHI->eraseFromParent(); 975 InsertedInstructions.erase(OffsetPHI); 976 SizePHI->replaceAllUsesWith(UndefValue::get(IntTy)); 977 SizePHI->eraseFromParent(); 978 InsertedInstructions.erase(SizePHI); 979 return unknown(); 980 } 981 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 982 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 983 } 984 985 Value *Size = SizePHI, *Offset = OffsetPHI; 986 if (Value *Tmp = SizePHI->hasConstantValue()) { 987 Size = Tmp; 988 SizePHI->replaceAllUsesWith(Size); 989 SizePHI->eraseFromParent(); 990 InsertedInstructions.erase(SizePHI); 991 } 992 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 993 Offset = Tmp; 994 OffsetPHI->replaceAllUsesWith(Offset); 995 OffsetPHI->eraseFromParent(); 996 InsertedInstructions.erase(OffsetPHI); 997 } 998 return std::make_pair(Size, Offset); 999 } 1000 1001 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1002 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1003 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1004 1005 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1006 return unknown(); 1007 if (TrueSide == FalseSide) 1008 return TrueSide; 1009 1010 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1011 FalseSide.first); 1012 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1013 FalseSide.second); 1014 return std::make_pair(Size, Offset); 1015 } 1016 1017 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1018 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1019 << '\n'); 1020 return unknown(); 1021 } 1022