1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/TargetFolder.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/Utils/Local.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/Argument.h" 25 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GlobalAlias.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/Instruction.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Operator.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include <cassert> 43 #include <cstdint> 44 #include <iterator> 45 #include <type_traits> 46 #include <utility> 47 48 using namespace llvm; 49 50 #define DEBUG_TYPE "memory-builtins" 51 52 enum AllocType : uint8_t { 53 OpNewLike = 1<<0, // allocates; never returns null 54 MallocLike = 1<<1, // allocates; may return null 55 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 56 CallocLike = 1<<3, // allocates + bzero 57 ReallocLike = 1<<4, // reallocates 58 StrDupLike = 1<<5, 59 MallocOrOpNewLike = MallocLike | OpNewLike, 60 MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike, 61 AllocLike = MallocOrCallocLike | StrDupLike, 62 AnyAlloc = AllocLike | ReallocLike 63 }; 64 65 enum class MallocFamily { 66 Malloc, 67 CPPNew, // new(unsigned int) 68 CPPNewAligned, // new(unsigned int, align_val_t) 69 CPPNewArray, // new[](unsigned int) 70 CPPNewArrayAligned, // new[](unsigned long, align_val_t) 71 MSVCNew, // new(unsigned int) 72 MSVCArrayNew, // new[](unsigned int) 73 VecMalloc, 74 KmpcAllocShared, 75 }; 76 77 StringRef mangledNameForMallocFamily(const MallocFamily &Family) { 78 switch (Family) { 79 case MallocFamily::Malloc: 80 return "malloc"; 81 case MallocFamily::CPPNew: 82 return "_Znwm"; 83 case MallocFamily::CPPNewAligned: 84 return "_ZnwmSt11align_val_t"; 85 case MallocFamily::CPPNewArray: 86 return "_Znam"; 87 case MallocFamily::CPPNewArrayAligned: 88 return "_ZnamSt11align_val_t"; 89 case MallocFamily::MSVCNew: 90 return "??2@YAPAXI@Z"; 91 case MallocFamily::MSVCArrayNew: 92 return "??_U@YAPAXI@Z"; 93 case MallocFamily::VecMalloc: 94 return "vec_malloc"; 95 case MallocFamily::KmpcAllocShared: 96 return "__kmpc_alloc_shared"; 97 } 98 llvm_unreachable("missing an alloc family"); 99 } 100 101 struct AllocFnsTy { 102 AllocType AllocTy; 103 unsigned NumParams; 104 // First and Second size parameters (or -1 if unused) 105 int FstParam, SndParam; 106 // Alignment parameter for aligned_alloc and aligned new 107 int AlignParam; 108 // Name of default allocator function to group malloc/free calls by family 109 MallocFamily Family; 110 }; 111 112 // clang-format off 113 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 114 // know which functions are nounwind, noalias, nocapture parameters, etc. 115 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 116 {LibFunc_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}}, 117 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::VecMalloc}}, 118 {LibFunc_valloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}}, 119 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int) 120 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int, nothrow) 121 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t) 122 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t, nothrow) 123 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long) 124 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long, nothrow) 125 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t) 126 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t, nothrow) 127 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int) 128 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int, nothrow) 129 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t) 130 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t, nothrow) 131 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long) 132 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long, nothrow) 133 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t) 134 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t, nothrow) 135 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int) 136 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int, nothrow) 137 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long) 138 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long, nothrow) 139 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int) 140 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int, nothrow) 141 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long) 142 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long, nothrow) 143 {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}}, 144 {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}}, 145 {LibFunc_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::Malloc}}, 146 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::VecMalloc}}, 147 {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 148 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::VecMalloc}}, 149 {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 150 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 151 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 152 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1, MallocFamily::KmpcAllocShared}}, 153 // TODO: Handle "int posix_memalign(void **, size_t, size_t)" 154 }; 155 // clang-format on 156 157 static const Function *getCalledFunction(const Value *V, 158 bool &IsNoBuiltin) { 159 // Don't care about intrinsics in this case. 160 if (isa<IntrinsicInst>(V)) 161 return nullptr; 162 163 const auto *CB = dyn_cast<CallBase>(V); 164 if (!CB) 165 return nullptr; 166 167 IsNoBuiltin = CB->isNoBuiltin(); 168 169 if (const Function *Callee = CB->getCalledFunction()) 170 return Callee; 171 return nullptr; 172 } 173 174 /// Returns the allocation data for the given value if it's a call to a known 175 /// allocation function. 176 static Optional<AllocFnsTy> 177 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 178 const TargetLibraryInfo *TLI) { 179 // Make sure that the function is available. 180 LibFunc TLIFn; 181 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 182 return None; 183 184 const auto *Iter = find_if( 185 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 186 return P.first == TLIFn; 187 }); 188 189 if (Iter == std::end(AllocationFnData)) 190 return None; 191 192 const AllocFnsTy *FnData = &Iter->second; 193 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 194 return None; 195 196 // Check function prototype. 197 int FstParam = FnData->FstParam; 198 int SndParam = FnData->SndParam; 199 FunctionType *FTy = Callee->getFunctionType(); 200 201 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 202 FTy->getNumParams() == FnData->NumParams && 203 (FstParam < 0 || 204 (FTy->getParamType(FstParam)->isIntegerTy(32) || 205 FTy->getParamType(FstParam)->isIntegerTy(64))) && 206 (SndParam < 0 || 207 FTy->getParamType(SndParam)->isIntegerTy(32) || 208 FTy->getParamType(SndParam)->isIntegerTy(64))) 209 return *FnData; 210 return None; 211 } 212 213 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 214 const TargetLibraryInfo *TLI) { 215 bool IsNoBuiltinCall; 216 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 217 if (!IsNoBuiltinCall) 218 return getAllocationDataForFunction(Callee, AllocTy, TLI); 219 return None; 220 } 221 222 static Optional<AllocFnsTy> 223 getAllocationData(const Value *V, AllocType AllocTy, 224 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 225 bool IsNoBuiltinCall; 226 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 227 if (!IsNoBuiltinCall) 228 return getAllocationDataForFunction( 229 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 230 return None; 231 } 232 233 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 234 const TargetLibraryInfo *TLI) { 235 bool IsNoBuiltinCall; 236 const Function *Callee = 237 getCalledFunction(V, IsNoBuiltinCall); 238 if (!Callee) 239 return None; 240 241 // Prefer to use existing information over allocsize. This will give us an 242 // accurate AllocTy. 243 if (!IsNoBuiltinCall) 244 if (Optional<AllocFnsTy> Data = 245 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 246 return Data; 247 248 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 249 if (Attr == Attribute()) 250 return None; 251 252 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 253 254 AllocFnsTy Result; 255 // Because allocsize only tells us how many bytes are allocated, we're not 256 // really allowed to assume anything, so we use MallocLike. 257 Result.AllocTy = MallocLike; 258 Result.NumParams = Callee->getNumOperands(); 259 Result.FstParam = Args.first; 260 Result.SndParam = Args.second.getValueOr(-1); 261 // Allocsize has no way to specify an alignment argument 262 Result.AlignParam = -1; 263 return Result; 264 } 265 266 /// Tests if a value is a call or invoke to a library function that 267 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 268 /// like). 269 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 270 return getAllocationData(V, AnyAlloc, TLI).hasValue(); 271 } 272 bool llvm::isAllocationFn( 273 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 274 return getAllocationData(V, AnyAlloc, GetTLI).hasValue(); 275 } 276 277 /// Tests if a value is a call or invoke to a library function that 278 /// allocates uninitialized memory (such as malloc). 279 static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 280 return getAllocationData(V, MallocOrOpNewLike, TLI).hasValue(); 281 } 282 283 /// Tests if a value is a call or invoke to a library function that 284 /// allocates uninitialized memory with alignment (such as aligned_alloc). 285 static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 286 return getAllocationData(V, AlignedAllocLike, TLI) 287 .hasValue(); 288 } 289 290 /// Tests if a value is a call or invoke to a library function that 291 /// allocates zero-filled memory (such as calloc). 292 static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 293 return getAllocationData(V, CallocLike, TLI).hasValue(); 294 } 295 296 /// Tests if a value is a call or invoke to a library function that 297 /// allocates memory similar to malloc or calloc. 298 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 299 return getAllocationData(V, MallocOrCallocLike, TLI).hasValue(); 300 } 301 302 /// Tests if a value is a call or invoke to a library function that 303 /// allocates memory (either malloc, calloc, or strdup like). 304 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 305 return getAllocationData(V, AllocLike, TLI).hasValue(); 306 } 307 308 /// Tests if a value is a call or invoke to a library function that 309 /// reallocates memory (e.g., realloc). 310 bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 311 return getAllocationData(V, ReallocLike, TLI).hasValue(); 312 } 313 314 /// Tests if a functions is a call or invoke to a library function that 315 /// reallocates memory (e.g., realloc). 316 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 317 return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue(); 318 } 319 320 bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) { 321 assert(isAllocationFn(CB, TLI)); 322 323 // Note: Removability is highly dependent on the source language. For 324 // example, recent C++ requires direct calls to the global allocation 325 // [basic.stc.dynamic.allocation] to be observable unless part of a new 326 // expression [expr.new paragraph 13]. 327 328 // Historically we've treated the C family allocation routines as removable 329 return isAllocLikeFn(CB, TLI); 330 } 331 332 Value *llvm::getAllocAlignment(const CallBase *V, 333 const TargetLibraryInfo *TLI) { 334 assert(isAllocationFn(V, TLI)); 335 336 const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 337 if (!FnData.hasValue() || FnData->AlignParam < 0) { 338 return nullptr; 339 } 340 return V->getOperand(FnData->AlignParam); 341 } 342 343 /// When we're compiling N-bit code, and the user uses parameters that are 344 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 345 /// trouble with APInt size issues. This function handles resizing + overflow 346 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 347 /// I's value. 348 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 349 // More bits than we can handle. Checking the bit width isn't necessary, but 350 // it's faster than checking active bits, and should give `false` in the 351 // vast majority of cases. 352 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 353 return false; 354 if (I.getBitWidth() != IntTyBits) 355 I = I.zextOrTrunc(IntTyBits); 356 return true; 357 } 358 359 Optional<APInt> 360 llvm::getAllocSize(const CallBase *CB, 361 const TargetLibraryInfo *TLI, 362 std::function<const Value*(const Value*)> Mapper) { 363 // Note: This handles both explicitly listed allocation functions and 364 // allocsize. The code structure could stand to be cleaned up a bit. 365 Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 366 if (!FnData) 367 return None; 368 369 // Get the index type for this address space, results and intermediate 370 // computations are performed at that width. 371 auto &DL = CB->getModule()->getDataLayout(); 372 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 373 374 // Handle strdup-like functions separately. 375 if (FnData->AllocTy == StrDupLike) { 376 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 377 if (!Size) 378 return None; 379 380 // Strndup limits strlen. 381 if (FnData->FstParam > 0) { 382 const ConstantInt *Arg = 383 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 384 if (!Arg) 385 return None; 386 387 APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); 388 if (Size.ugt(MaxSize)) 389 Size = MaxSize + 1; 390 } 391 return Size; 392 } 393 394 const ConstantInt *Arg = 395 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 396 if (!Arg) 397 return None; 398 399 APInt Size = Arg->getValue(); 400 if (!CheckedZextOrTrunc(Size, IntTyBits)) 401 return None; 402 403 // Size is determined by just 1 parameter. 404 if (FnData->SndParam < 0) 405 return Size; 406 407 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 408 if (!Arg) 409 return None; 410 411 APInt NumElems = Arg->getValue(); 412 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 413 return None; 414 415 bool Overflow; 416 Size = Size.umul_ov(NumElems, Overflow); 417 if (Overflow) 418 return None; 419 return Size; 420 } 421 422 Constant *llvm::getInitialValueOfAllocation(const CallBase *Alloc, 423 const TargetLibraryInfo *TLI, 424 Type *Ty) { 425 assert(isAllocationFn(Alloc, TLI)); 426 427 // malloc and aligned_alloc are uninitialized (undef) 428 if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI)) 429 return UndefValue::get(Ty); 430 431 // calloc zero initializes 432 if (isCallocLikeFn(Alloc, TLI)) 433 return Constant::getNullValue(Ty); 434 435 return nullptr; 436 } 437 438 struct FreeFnsTy { 439 unsigned NumParams; 440 // Name of default allocator function to group malloc/free calls by family 441 MallocFamily Family; 442 }; 443 444 // clang-format off 445 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 446 {LibFunc_free, {1, MallocFamily::Malloc}}, 447 {LibFunc_ZdlPv, {1, MallocFamily::CPPNew}}, // operator delete(void*) 448 {LibFunc_ZdaPv, {1, MallocFamily::CPPNewArray}}, // operator delete[](void*) 449 {LibFunc_msvc_delete_ptr32, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 450 {LibFunc_msvc_delete_ptr64, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 451 {LibFunc_msvc_delete_array_ptr32, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 452 {LibFunc_msvc_delete_array_ptr64, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 453 {LibFunc_ZdlPvj, {2, MallocFamily::CPPNew}}, // delete(void*, uint) 454 {LibFunc_ZdlPvm, {2, MallocFamily::CPPNew}}, // delete(void*, ulong) 455 {LibFunc_ZdlPvRKSt9nothrow_t, {2, MallocFamily::CPPNew}}, // delete(void*, nothrow) 456 {LibFunc_ZdlPvSt11align_val_t, {2, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t) 457 {LibFunc_ZdaPvj, {2, MallocFamily::CPPNewArray}}, // delete[](void*, uint) 458 {LibFunc_ZdaPvm, {2, MallocFamily::CPPNewArray}}, // delete[](void*, ulong) 459 {LibFunc_ZdaPvRKSt9nothrow_t, {2, MallocFamily::CPPNewArray}}, // delete[](void*, nothrow) 460 {LibFunc_ZdaPvSt11align_val_t, {2, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t) 461 {LibFunc_msvc_delete_ptr32_int, {2, MallocFamily::MSVCNew}}, // delete(void*, uint) 462 {LibFunc_msvc_delete_ptr64_longlong, {2, MallocFamily::MSVCNew}}, // delete(void*, ulonglong) 463 {LibFunc_msvc_delete_ptr32_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 464 {LibFunc_msvc_delete_ptr64_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 465 {LibFunc_msvc_delete_array_ptr32_int, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, uint) 466 {LibFunc_msvc_delete_array_ptr64_longlong, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, ulonglong) 467 {LibFunc_msvc_delete_array_ptr32_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 468 {LibFunc_msvc_delete_array_ptr64_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 469 {LibFunc___kmpc_free_shared, {2, MallocFamily::KmpcAllocShared}}, // OpenMP Offloading RTL free 470 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t, nothrow) 471 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t, nothrow) 472 {LibFunc_ZdlPvjSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned int, align_val_t) 473 {LibFunc_ZdlPvmSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned long, align_val_t) 474 {LibFunc_ZdaPvjSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned int, align_val_t) 475 {LibFunc_ZdaPvmSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned long, align_val_t) 476 }; 477 // clang-format on 478 479 Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 480 const LibFunc TLIFn) { 481 const auto *Iter = 482 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 483 return P.first == TLIFn; 484 }); 485 if (Iter == std::end(FreeFnData)) 486 return None; 487 return Iter->second; 488 } 489 490 Optional<StringRef> llvm::getAllocationFamily(const Value *I, 491 const TargetLibraryInfo *TLI) { 492 bool IsNoBuiltin; 493 const Function *Callee = getCalledFunction(I, IsNoBuiltin); 494 if (Callee == nullptr) 495 return None; 496 LibFunc TLIFn; 497 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 498 return None; 499 const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI); 500 if (AllocData.hasValue()) 501 return mangledNameForMallocFamily(AllocData.getValue().Family); 502 const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn); 503 if (FreeData.hasValue()) 504 return mangledNameForMallocFamily(FreeData.getValue().Family); 505 return None; 506 } 507 508 /// isLibFreeFunction - Returns true if the function is a builtin free() 509 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 510 Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 511 if (!FnData.hasValue()) 512 return false; 513 514 // Check free prototype. 515 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 516 // attribute will exist. 517 FunctionType *FTy = F->getFunctionType(); 518 if (!FTy->getReturnType()->isVoidTy()) 519 return false; 520 if (FTy->getNumParams() != FnData->NumParams) 521 return false; 522 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 523 return false; 524 525 return true; 526 } 527 528 /// isFreeCall - Returns non-null if the value is a call to the builtin free() 529 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { 530 bool IsNoBuiltinCall; 531 const Function *Callee = getCalledFunction(I, IsNoBuiltinCall); 532 if (Callee == nullptr || IsNoBuiltinCall) 533 return nullptr; 534 535 LibFunc TLIFn; 536 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 537 return nullptr; 538 539 return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr; 540 } 541 542 543 //===----------------------------------------------------------------------===// 544 // Utility functions to compute size of objects. 545 // 546 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 547 if (Data.second.isNegative() || Data.first.ult(Data.second)) 548 return APInt(Data.first.getBitWidth(), 0); 549 return Data.first - Data.second; 550 } 551 552 /// Compute the size of the object pointed by Ptr. Returns true and the 553 /// object size in Size if successful, and false otherwise. 554 /// If RoundToAlign is true, then Size is rounded up to the alignment of 555 /// allocas, byval arguments, and global variables. 556 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 557 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 558 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 559 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 560 if (!Visitor.bothKnown(Data)) 561 return false; 562 563 Size = getSizeWithOverflow(Data).getZExtValue(); 564 return true; 565 } 566 567 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 568 const DataLayout &DL, 569 const TargetLibraryInfo *TLI, 570 bool MustSucceed) { 571 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 572 "ObjectSize must be a call to llvm.objectsize!"); 573 574 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 575 ObjectSizeOpts EvalOptions; 576 // Unless we have to fold this to something, try to be as accurate as 577 // possible. 578 if (MustSucceed) 579 EvalOptions.EvalMode = 580 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 581 else 582 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 583 584 EvalOptions.NullIsUnknownSize = 585 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 586 587 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 588 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 589 if (StaticOnly) { 590 // FIXME: Does it make sense to just return a failure value if the size won't 591 // fit in the output and `!MustSucceed`? 592 uint64_t Size; 593 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 594 isUIntN(ResultType->getBitWidth(), Size)) 595 return ConstantInt::get(ResultType, Size); 596 } else { 597 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 598 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 599 SizeOffsetEvalType SizeOffsetPair = 600 Eval.compute(ObjectSize->getArgOperand(0)); 601 602 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 603 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 604 Builder.SetInsertPoint(ObjectSize); 605 606 // If we've outside the end of the object, then we can always access 607 // exactly 0 bytes. 608 Value *ResultSize = 609 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 610 Value *UseZero = 611 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 612 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 613 Value *Ret = Builder.CreateSelect( 614 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 615 616 // The non-constant size expression cannot evaluate to -1. 617 if (!isa<Constant>(SizeOffsetPair.first) || 618 !isa<Constant>(SizeOffsetPair.second)) 619 Builder.CreateAssumption( 620 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 621 622 return Ret; 623 } 624 } 625 626 if (!MustSucceed) 627 return nullptr; 628 629 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 630 } 631 632 STATISTIC(ObjectVisitorArgument, 633 "Number of arguments with unsolved size and offset"); 634 STATISTIC(ObjectVisitorLoad, 635 "Number of load instructions with unsolved size and offset"); 636 637 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 638 if (Options.RoundToAlign && Alignment) 639 return APInt(IntTyBits, alignTo(Size.getZExtValue(), Alignment)); 640 return Size; 641 } 642 643 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 644 const TargetLibraryInfo *TLI, 645 LLVMContext &Context, 646 ObjectSizeOpts Options) 647 : DL(DL), TLI(TLI), Options(Options) { 648 // Pointer size must be rechecked for each object visited since it could have 649 // a different address space. 650 } 651 652 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 653 unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 654 655 // Stripping pointer casts can strip address space casts which can change the 656 // index type size. The invariant is that we use the value type to determine 657 // the index type size and if we stripped address space casts we have to 658 // readjust the APInt as we pass it upwards in order for the APInt to match 659 // the type the caller passed in. 660 APInt Offset(InitialIntTyBits, 0); 661 V = V->stripAndAccumulateConstantOffsets( 662 DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true); 663 664 // Later we use the index type size and zero but it will match the type of the 665 // value that is passed to computeImpl. 666 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 667 Zero = APInt::getZero(IntTyBits); 668 669 bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; 670 if (!IndexTypeSizeChanged && Offset.isZero()) 671 return computeImpl(V); 672 673 // We stripped an address space cast that changed the index type size or we 674 // accumulated some constant offset (or both). Readjust the bit width to match 675 // the argument index type size and apply the offset, as required. 676 SizeOffsetType SOT = computeImpl(V); 677 if (IndexTypeSizeChanged) { 678 if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) 679 SOT.first = APInt(); 680 if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) 681 SOT.second = APInt(); 682 } 683 // If the computed offset is "unknown" we cannot add the stripped offset. 684 return {SOT.first, 685 SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; 686 } 687 688 SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { 689 if (Instruction *I = dyn_cast<Instruction>(V)) { 690 // If we have already seen this instruction, bail out. Cycles can happen in 691 // unreachable code after constant propagation. 692 if (!SeenInsts.insert(I).second) 693 return unknown(); 694 695 return visit(*I); 696 } 697 if (Argument *A = dyn_cast<Argument>(V)) 698 return visitArgument(*A); 699 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 700 return visitConstantPointerNull(*P); 701 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 702 return visitGlobalAlias(*GA); 703 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 704 return visitGlobalVariable(*GV); 705 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 706 return visitUndefValue(*UV); 707 708 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 709 << *V << '\n'); 710 return unknown(); 711 } 712 713 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 714 return ::CheckedZextOrTrunc(I, IntTyBits); 715 } 716 717 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 718 if (!I.getAllocatedType()->isSized()) 719 return unknown(); 720 721 TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); 722 if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) 723 return unknown(); 724 APInt Size(IntTyBits, ElemSize.getKnownMinSize()); 725 if (!I.isArrayAllocation()) 726 return std::make_pair(align(Size, I.getAlign()), Zero); 727 728 Value *ArraySize = I.getArraySize(); 729 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 730 APInt NumElems = C->getValue(); 731 if (!CheckedZextOrTrunc(NumElems)) 732 return unknown(); 733 734 bool Overflow; 735 Size = Size.umul_ov(NumElems, Overflow); 736 return Overflow ? unknown() 737 : std::make_pair(align(Size, I.getAlign()), Zero); 738 } 739 return unknown(); 740 } 741 742 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 743 Type *MemoryTy = A.getPointeeInMemoryValueType(); 744 // No interprocedural analysis is done at the moment. 745 if (!MemoryTy|| !MemoryTy->isSized()) { 746 ++ObjectVisitorArgument; 747 return unknown(); 748 } 749 750 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 751 return std::make_pair(align(Size, A.getParamAlign()), Zero); 752 } 753 754 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 755 auto Mapper = [](const Value *V) { return V; }; 756 if (Optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) 757 return std::make_pair(*Size, Zero); 758 return unknown(); 759 } 760 761 SizeOffsetType 762 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 763 // If null is unknown, there's nothing we can do. Additionally, non-zero 764 // address spaces can make use of null, so we don't presume to know anything 765 // about that. 766 // 767 // TODO: How should this work with address space casts? We currently just drop 768 // them on the floor, but it's unclear what we should do when a NULL from 769 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 770 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 771 return unknown(); 772 return std::make_pair(Zero, Zero); 773 } 774 775 SizeOffsetType 776 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 777 return unknown(); 778 } 779 780 SizeOffsetType 781 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 782 // Easy cases were already folded by previous passes. 783 return unknown(); 784 } 785 786 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 787 if (GA.isInterposable()) 788 return unknown(); 789 return compute(GA.getAliasee()); 790 } 791 792 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 793 if (!GV.hasDefinitiveInitializer()) 794 return unknown(); 795 796 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 797 return std::make_pair(align(Size, GV.getAlign()), Zero); 798 } 799 800 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 801 // clueless 802 return unknown(); 803 } 804 805 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { 806 ++ObjectVisitorLoad; 807 return unknown(); 808 } 809 810 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { 811 // too complex to analyze statically. 812 return unknown(); 813 } 814 815 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 816 SizeOffsetType TrueSide = compute(I.getTrueValue()); 817 SizeOffsetType FalseSide = compute(I.getFalseValue()); 818 if (bothKnown(TrueSide) && bothKnown(FalseSide)) { 819 if (TrueSide == FalseSide) { 820 return TrueSide; 821 } 822 823 APInt TrueResult = getSizeWithOverflow(TrueSide); 824 APInt FalseResult = getSizeWithOverflow(FalseSide); 825 826 if (TrueResult == FalseResult) { 827 return TrueSide; 828 } 829 if (Options.EvalMode == ObjectSizeOpts::Mode::Min) { 830 if (TrueResult.slt(FalseResult)) 831 return TrueSide; 832 return FalseSide; 833 } 834 if (Options.EvalMode == ObjectSizeOpts::Mode::Max) { 835 if (TrueResult.sgt(FalseResult)) 836 return TrueSide; 837 return FalseSide; 838 } 839 } 840 return unknown(); 841 } 842 843 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 844 return std::make_pair(Zero, Zero); 845 } 846 847 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 848 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 849 << '\n'); 850 return unknown(); 851 } 852 853 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 854 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 855 ObjectSizeOpts EvalOpts) 856 : DL(DL), TLI(TLI), Context(Context), 857 Builder(Context, TargetFolder(DL), 858 IRBuilderCallbackInserter( 859 [&](Instruction *I) { InsertedInstructions.insert(I); })), 860 EvalOpts(EvalOpts) { 861 // IntTy and Zero must be set for each compute() since the address space may 862 // be different for later objects. 863 } 864 865 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 866 // XXX - Are vectors of pointers possible here? 867 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 868 Zero = ConstantInt::get(IntTy, 0); 869 870 SizeOffsetEvalType Result = compute_(V); 871 872 if (!bothKnown(Result)) { 873 // Erase everything that was computed in this iteration from the cache, so 874 // that no dangling references are left behind. We could be a bit smarter if 875 // we kept a dependency graph. It's probably not worth the complexity. 876 for (const Value *SeenVal : SeenVals) { 877 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 878 // non-computable results can be safely cached 879 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 880 CacheMap.erase(CacheIt); 881 } 882 883 // Erase any instructions we inserted as part of the traversal. 884 for (Instruction *I : InsertedInstructions) { 885 I->replaceAllUsesWith(UndefValue::get(I->getType())); 886 I->eraseFromParent(); 887 } 888 } 889 890 SeenVals.clear(); 891 InsertedInstructions.clear(); 892 return Result; 893 } 894 895 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 896 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 897 SizeOffsetType Const = Visitor.compute(V); 898 if (Visitor.bothKnown(Const)) 899 return std::make_pair(ConstantInt::get(Context, Const.first), 900 ConstantInt::get(Context, Const.second)); 901 902 V = V->stripPointerCasts(); 903 904 // Check cache. 905 CacheMapTy::iterator CacheIt = CacheMap.find(V); 906 if (CacheIt != CacheMap.end()) 907 return CacheIt->second; 908 909 // Always generate code immediately before the instruction being 910 // processed, so that the generated code dominates the same BBs. 911 BuilderTy::InsertPointGuard Guard(Builder); 912 if (Instruction *I = dyn_cast<Instruction>(V)) 913 Builder.SetInsertPoint(I); 914 915 // Now compute the size and offset. 916 SizeOffsetEvalType Result; 917 918 // Record the pointers that were handled in this run, so that they can be 919 // cleaned later if something fails. We also use this set to break cycles that 920 // can occur in dead code. 921 if (!SeenVals.insert(V).second) { 922 Result = unknown(); 923 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 924 Result = visitGEPOperator(*GEP); 925 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 926 Result = visit(*I); 927 } else if (isa<Argument>(V) || 928 (isa<ConstantExpr>(V) && 929 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 930 isa<GlobalAlias>(V) || 931 isa<GlobalVariable>(V)) { 932 // Ignore values where we cannot do more than ObjectSizeVisitor. 933 Result = unknown(); 934 } else { 935 LLVM_DEBUG( 936 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 937 << '\n'); 938 Result = unknown(); 939 } 940 941 // Don't reuse CacheIt since it may be invalid at this point. 942 CacheMap[V] = Result; 943 return Result; 944 } 945 946 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 947 if (!I.getAllocatedType()->isSized()) 948 return unknown(); 949 950 // must be a VLA 951 assert(I.isArrayAllocation()); 952 953 // If needed, adjust the alloca's operand size to match the pointer size. 954 // Subsequent math operations expect the types to match. 955 Value *ArraySize = Builder.CreateZExtOrTrunc( 956 I.getArraySize(), DL.getIntPtrType(I.getContext())); 957 assert(ArraySize->getType() == Zero->getType() && 958 "Expected zero constant to have pointer type"); 959 960 Value *Size = ConstantInt::get(ArraySize->getType(), 961 DL.getTypeAllocSize(I.getAllocatedType())); 962 Size = Builder.CreateMul(Size, ArraySize); 963 return std::make_pair(Size, Zero); 964 } 965 966 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 967 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 968 if (!FnData) 969 return unknown(); 970 971 // Handle strdup-like functions separately. 972 if (FnData->AllocTy == StrDupLike) { 973 // TODO: implement evaluation of strdup/strndup 974 return unknown(); 975 } 976 977 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 978 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 979 if (FnData->SndParam < 0) 980 return std::make_pair(FirstArg, Zero); 981 982 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 983 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 984 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 985 return std::make_pair(Size, Zero); 986 } 987 988 SizeOffsetEvalType 989 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 990 return unknown(); 991 } 992 993 SizeOffsetEvalType 994 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 995 return unknown(); 996 } 997 998 SizeOffsetEvalType 999 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 1000 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 1001 if (!bothKnown(PtrData)) 1002 return unknown(); 1003 1004 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 1005 Offset = Builder.CreateAdd(PtrData.second, Offset); 1006 return std::make_pair(PtrData.first, Offset); 1007 } 1008 1009 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 1010 // clueless 1011 return unknown(); 1012 } 1013 1014 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { 1015 return unknown(); 1016 } 1017 1018 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 1019 // Create 2 PHIs: one for size and another for offset. 1020 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1021 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1022 1023 // Insert right away in the cache to handle recursive PHIs. 1024 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 1025 1026 // Compute offset/size for each PHI incoming pointer. 1027 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 1028 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 1029 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 1030 1031 if (!bothKnown(EdgeData)) { 1032 OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy)); 1033 OffsetPHI->eraseFromParent(); 1034 InsertedInstructions.erase(OffsetPHI); 1035 SizePHI->replaceAllUsesWith(UndefValue::get(IntTy)); 1036 SizePHI->eraseFromParent(); 1037 InsertedInstructions.erase(SizePHI); 1038 return unknown(); 1039 } 1040 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 1041 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 1042 } 1043 1044 Value *Size = SizePHI, *Offset = OffsetPHI; 1045 if (Value *Tmp = SizePHI->hasConstantValue()) { 1046 Size = Tmp; 1047 SizePHI->replaceAllUsesWith(Size); 1048 SizePHI->eraseFromParent(); 1049 InsertedInstructions.erase(SizePHI); 1050 } 1051 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 1052 Offset = Tmp; 1053 OffsetPHI->replaceAllUsesWith(Offset); 1054 OffsetPHI->eraseFromParent(); 1055 InsertedInstructions.erase(OffsetPHI); 1056 } 1057 return std::make_pair(Size, Offset); 1058 } 1059 1060 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1061 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1062 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1063 1064 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1065 return unknown(); 1066 if (TrueSide == FalseSide) 1067 return TrueSide; 1068 1069 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1070 FalseSide.first); 1071 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1072 FalseSide.second); 1073 return std::make_pair(Size, Offset); 1074 } 1075 1076 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1077 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1078 << '\n'); 1079 return unknown(); 1080 } 1081