1 //===- WholeProgramDevirt.cpp - Whole program virtual call optimization ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass implements whole program optimization of virtual calls in cases 11 // where we know (via bitset information) that the list of callee is fixed. This 12 // includes the following: 13 // - Single implementation devirtualization: if a virtual call has a single 14 // possible callee, replace all calls with a direct call to that callee. 15 // - Virtual constant propagation: if the virtual function's return type is an 16 // integer <=64 bits and all possible callees are readnone, for each class and 17 // each list of constant arguments: evaluate the function, store the return 18 // value alongside the virtual table, and rewrite each virtual call as a load 19 // from the virtual table. 20 // - Uniform return value optimization: if the conditions for virtual constant 21 // propagation hold and each function returns the same constant value, replace 22 // each virtual call with that constant. 23 // - Unique return value optimization for i1 return values: if the conditions 24 // for virtual constant propagation hold and a single vtable's function 25 // returns 0, or a single vtable's function returns 1, replace each virtual 26 // call with a comparison of the vptr against that vtable's address. 27 // 28 //===----------------------------------------------------------------------===// 29 30 #include "llvm/Transforms/IPO/WholeProgramDevirt.h" 31 #include "llvm/ADT/ArrayRef.h" 32 #include "llvm/ADT/DenseSet.h" 33 #include "llvm/ADT/MapVector.h" 34 #include "llvm/IR/CallSite.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/IRBuilder.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/Module.h" 41 #include "llvm/Pass.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Transforms/IPO.h" 44 #include "llvm/Transforms/Utils/Evaluator.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 47 #include <set> 48 49 using namespace llvm; 50 using namespace wholeprogramdevirt; 51 52 #define DEBUG_TYPE "wholeprogramdevirt" 53 54 // Find the minimum offset that we may store a value of size Size bits at. If 55 // IsAfter is set, look for an offset before the object, otherwise look for an 56 // offset after the object. 57 uint64_t 58 wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets, 59 bool IsAfter, uint64_t Size) { 60 // Find a minimum offset taking into account only vtable sizes. 61 uint64_t MinByte = 0; 62 for (const VirtualCallTarget &Target : Targets) { 63 if (IsAfter) 64 MinByte = std::max(MinByte, Target.minAfterBytes()); 65 else 66 MinByte = std::max(MinByte, Target.minBeforeBytes()); 67 } 68 69 // Build a vector of arrays of bytes covering, for each target, a slice of the 70 // used region (see AccumBitVector::BytesUsed in 71 // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively, 72 // this aligns the used regions to start at MinByte. 73 // 74 // In this example, A, B and C are vtables, # is a byte already allocated for 75 // a virtual function pointer, AAAA... (etc.) are the used regions for the 76 // vtables and Offset(X) is the value computed for the Offset variable below 77 // for X. 78 // 79 // Offset(A) 80 // | | 81 // |MinByte 82 // A: ################AAAAAAAA|AAAAAAAA 83 // B: ########BBBBBBBBBBBBBBBB|BBBB 84 // C: ########################|CCCCCCCCCCCCCCCC 85 // | Offset(B) | 86 // 87 // This code produces the slices of A, B and C that appear after the divider 88 // at MinByte. 89 std::vector<ArrayRef<uint8_t>> Used; 90 for (const VirtualCallTarget &Target : Targets) { 91 ArrayRef<uint8_t> VTUsed = IsAfter ? Target.BS->Bits->After.BytesUsed 92 : Target.BS->Bits->Before.BytesUsed; 93 uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes() 94 : MinByte - Target.minBeforeBytes(); 95 96 // Disregard used regions that are smaller than Offset. These are 97 // effectively all-free regions that do not need to be checked. 98 if (VTUsed.size() > Offset) 99 Used.push_back(VTUsed.slice(Offset)); 100 } 101 102 if (Size == 1) { 103 // Find a free bit in each member of Used. 104 for (unsigned I = 0;; ++I) { 105 uint8_t BitsUsed = 0; 106 for (auto &&B : Used) 107 if (I < B.size()) 108 BitsUsed |= B[I]; 109 if (BitsUsed != 0xff) 110 return (MinByte + I) * 8 + 111 countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined); 112 } 113 } else { 114 // Find a free (Size/8) byte region in each member of Used. 115 // FIXME: see if alignment helps. 116 for (unsigned I = 0;; ++I) { 117 for (auto &&B : Used) { 118 unsigned Byte = 0; 119 while ((I + Byte) < B.size() && Byte < (Size / 8)) { 120 if (B[I + Byte]) 121 goto NextI; 122 ++Byte; 123 } 124 } 125 return (MinByte + I) * 8; 126 NextI:; 127 } 128 } 129 } 130 131 void wholeprogramdevirt::setBeforeReturnValues( 132 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocBefore, 133 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { 134 if (BitWidth == 1) 135 OffsetByte = -(AllocBefore / 8 + 1); 136 else 137 OffsetByte = -((AllocBefore + 7) / 8 + (BitWidth + 7) / 8); 138 OffsetBit = AllocBefore % 8; 139 140 for (VirtualCallTarget &Target : Targets) { 141 if (BitWidth == 1) 142 Target.setBeforeBit(AllocBefore); 143 else 144 Target.setBeforeBytes(AllocBefore, (BitWidth + 7) / 8); 145 } 146 } 147 148 void wholeprogramdevirt::setAfterReturnValues( 149 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocAfter, 150 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { 151 if (BitWidth == 1) 152 OffsetByte = AllocAfter / 8; 153 else 154 OffsetByte = (AllocAfter + 7) / 8; 155 OffsetBit = AllocAfter % 8; 156 157 for (VirtualCallTarget &Target : Targets) { 158 if (BitWidth == 1) 159 Target.setAfterBit(AllocAfter); 160 else 161 Target.setAfterBytes(AllocAfter, (BitWidth + 7) / 8); 162 } 163 } 164 165 VirtualCallTarget::VirtualCallTarget(Function *Fn, const BitSetInfo *BS) 166 : Fn(Fn), BS(BS), 167 IsBigEndian(Fn->getParent()->getDataLayout().isBigEndian()) {} 168 169 namespace { 170 171 // A slot in a set of virtual tables. The BitSetID identifies the set of virtual 172 // tables, and the ByteOffset is the offset in bytes from the address point to 173 // the virtual function pointer. 174 struct VTableSlot { 175 Metadata *BitSetID; 176 uint64_t ByteOffset; 177 }; 178 179 } 180 181 namespace llvm { 182 183 template <> struct DenseMapInfo<VTableSlot> { 184 static VTableSlot getEmptyKey() { 185 return {DenseMapInfo<Metadata *>::getEmptyKey(), 186 DenseMapInfo<uint64_t>::getEmptyKey()}; 187 } 188 static VTableSlot getTombstoneKey() { 189 return {DenseMapInfo<Metadata *>::getTombstoneKey(), 190 DenseMapInfo<uint64_t>::getTombstoneKey()}; 191 } 192 static unsigned getHashValue(const VTableSlot &I) { 193 return DenseMapInfo<Metadata *>::getHashValue(I.BitSetID) ^ 194 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset); 195 } 196 static bool isEqual(const VTableSlot &LHS, 197 const VTableSlot &RHS) { 198 return LHS.BitSetID == RHS.BitSetID && LHS.ByteOffset == RHS.ByteOffset; 199 } 200 }; 201 202 } 203 204 namespace { 205 206 // A virtual call site. VTable is the loaded virtual table pointer, and CS is 207 // the indirect virtual call. 208 struct VirtualCallSite { 209 Value *VTable; 210 CallSite CS; 211 212 void replaceAndErase(Value *New) { 213 CS->replaceAllUsesWith(New); 214 if (auto II = dyn_cast<InvokeInst>(CS.getInstruction())) { 215 BranchInst::Create(II->getNormalDest(), CS.getInstruction()); 216 II->getUnwindDest()->removePredecessor(II->getParent()); 217 } 218 CS->eraseFromParent(); 219 } 220 }; 221 222 struct DevirtModule { 223 Module &M; 224 IntegerType *Int8Ty; 225 PointerType *Int8PtrTy; 226 IntegerType *Int32Ty; 227 228 MapVector<VTableSlot, std::vector<VirtualCallSite>> CallSlots; 229 230 DevirtModule(Module &M) 231 : M(M), Int8Ty(Type::getInt8Ty(M.getContext())), 232 Int8PtrTy(Type::getInt8PtrTy(M.getContext())), 233 Int32Ty(Type::getInt32Ty(M.getContext())) {} 234 void findLoadCallsAtConstantOffset(Metadata *BitSet, Value *Ptr, 235 uint64_t Offset, Value *VTable); 236 void findCallsAtConstantOffset(Metadata *BitSet, Value *Ptr, uint64_t Offset, 237 Value *VTable); 238 239 void buildBitSets(std::vector<VTableBits> &Bits, 240 DenseMap<Metadata *, std::set<BitSetInfo>> &BitSets); 241 bool tryFindVirtualCallTargets(std::vector<VirtualCallTarget> &TargetsForSlot, 242 const std::set<BitSetInfo> &BitSetInfos, 243 uint64_t ByteOffset); 244 bool trySingleImplDevirt(ArrayRef<VirtualCallTarget> TargetsForSlot, 245 MutableArrayRef<VirtualCallSite> CallSites); 246 bool tryEvaluateFunctionsWithArgs( 247 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 248 ArrayRef<ConstantInt *> Args); 249 bool tryUniformRetValOpt(IntegerType *RetType, 250 ArrayRef<VirtualCallTarget> TargetsForSlot, 251 MutableArrayRef<VirtualCallSite> CallSites); 252 bool tryUniqueRetValOpt(unsigned BitWidth, 253 ArrayRef<VirtualCallTarget> TargetsForSlot, 254 MutableArrayRef<VirtualCallSite> CallSites); 255 bool tryVirtualConstProp(MutableArrayRef<VirtualCallTarget> TargetsForSlot, 256 ArrayRef<VirtualCallSite> CallSites); 257 258 void rebuildGlobal(VTableBits &B); 259 260 bool run(); 261 }; 262 263 struct WholeProgramDevirt : public ModulePass { 264 static char ID; 265 WholeProgramDevirt() : ModulePass(ID) { 266 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry()); 267 } 268 bool runOnModule(Module &M) { return DevirtModule(M).run(); } 269 }; 270 271 } // anonymous namespace 272 273 INITIALIZE_PASS(WholeProgramDevirt, "wholeprogramdevirt", 274 "Whole program devirtualization", false, false) 275 char WholeProgramDevirt::ID = 0; 276 277 ModulePass *llvm::createWholeProgramDevirtPass() { 278 return new WholeProgramDevirt; 279 } 280 281 // Search for virtual calls that call FPtr and add them to CallSlots. 282 void DevirtModule::findCallsAtConstantOffset(Metadata *BitSet, Value *FPtr, 283 uint64_t Offset, Value *VTable) { 284 for (const Use &U : FPtr->uses()) { 285 Value *User = U.getUser(); 286 if (isa<BitCastInst>(User)) { 287 findCallsAtConstantOffset(BitSet, User, Offset, VTable); 288 } else if (auto CI = dyn_cast<CallInst>(User)) { 289 CallSlots[{BitSet, Offset}].push_back({VTable, CI}); 290 } else if (auto II = dyn_cast<InvokeInst>(User)) { 291 CallSlots[{BitSet, Offset}].push_back({VTable, II}); 292 } 293 } 294 } 295 296 // Search for virtual calls that load from VPtr and add them to CallSlots. 297 void DevirtModule::findLoadCallsAtConstantOffset(Metadata *BitSet, Value *VPtr, 298 uint64_t Offset, 299 Value *VTable) { 300 for (const Use &U : VPtr->uses()) { 301 Value *User = U.getUser(); 302 if (isa<BitCastInst>(User)) { 303 findLoadCallsAtConstantOffset(BitSet, User, Offset, VTable); 304 } else if (isa<LoadInst>(User)) { 305 findCallsAtConstantOffset(BitSet, User, Offset, VTable); 306 } else if (auto GEP = dyn_cast<GetElementPtrInst>(User)) { 307 // Take into account the GEP offset. 308 if (VPtr == GEP->getPointerOperand() && GEP->hasAllConstantIndices()) { 309 SmallVector<Value *, 8> Indices(GEP->op_begin() + 1, GEP->op_end()); 310 uint64_t GEPOffset = M.getDataLayout().getIndexedOffsetInType( 311 GEP->getSourceElementType(), Indices); 312 findLoadCallsAtConstantOffset(BitSet, User, Offset + GEPOffset, VTable); 313 } 314 } 315 } 316 } 317 318 void DevirtModule::buildBitSets( 319 std::vector<VTableBits> &Bits, 320 DenseMap<Metadata *, std::set<BitSetInfo>> &BitSets) { 321 NamedMDNode *BitSetNM = M.getNamedMetadata("llvm.bitsets"); 322 if (!BitSetNM) 323 return; 324 325 DenseMap<GlobalVariable *, VTableBits *> GVToBits; 326 Bits.reserve(BitSetNM->getNumOperands()); 327 for (auto Op : BitSetNM->operands()) { 328 auto OpConstMD = dyn_cast_or_null<ConstantAsMetadata>(Op->getOperand(1)); 329 if (!OpConstMD) 330 continue; 331 auto BitSetID = Op->getOperand(0).get(); 332 333 Constant *OpConst = OpConstMD->getValue(); 334 if (auto GA = dyn_cast<GlobalAlias>(OpConst)) 335 OpConst = GA->getAliasee(); 336 auto OpGlobal = dyn_cast<GlobalVariable>(OpConst); 337 if (!OpGlobal) 338 continue; 339 340 uint64_t Offset = 341 cast<ConstantInt>( 342 cast<ConstantAsMetadata>(Op->getOperand(2))->getValue()) 343 ->getZExtValue(); 344 345 VTableBits *&BitsPtr = GVToBits[OpGlobal]; 346 if (!BitsPtr) { 347 Bits.emplace_back(); 348 Bits.back().GV = OpGlobal; 349 Bits.back().ObjectSize = M.getDataLayout().getTypeAllocSize( 350 OpGlobal->getInitializer()->getType()); 351 BitsPtr = &Bits.back(); 352 } 353 BitSets[BitSetID].insert({BitsPtr, Offset}); 354 } 355 } 356 357 bool DevirtModule::tryFindVirtualCallTargets( 358 std::vector<VirtualCallTarget> &TargetsForSlot, 359 const std::set<BitSetInfo> &BitSetInfos, uint64_t ByteOffset) { 360 for (const BitSetInfo &BS : BitSetInfos) { 361 if (!BS.Bits->GV->isConstant()) 362 return false; 363 364 auto Init = dyn_cast<ConstantArray>(BS.Bits->GV->getInitializer()); 365 if (!Init) 366 return false; 367 ArrayType *VTableTy = Init->getType(); 368 369 uint64_t ElemSize = 370 M.getDataLayout().getTypeAllocSize(VTableTy->getElementType()); 371 uint64_t GlobalSlotOffset = BS.Offset + ByteOffset; 372 if (GlobalSlotOffset % ElemSize != 0) 373 return false; 374 375 unsigned Op = GlobalSlotOffset / ElemSize; 376 if (Op >= Init->getNumOperands()) 377 return false; 378 379 auto Fn = dyn_cast<Function>(Init->getOperand(Op)->stripPointerCasts()); 380 if (!Fn) 381 return false; 382 383 // We can disregard __cxa_pure_virtual as a possible call target, as 384 // calls to pure virtuals are UB. 385 if (Fn->getName() == "__cxa_pure_virtual") 386 continue; 387 388 TargetsForSlot.push_back({Fn, &BS}); 389 } 390 391 // Give up if we couldn't find any targets. 392 return !TargetsForSlot.empty(); 393 } 394 395 bool DevirtModule::trySingleImplDevirt( 396 ArrayRef<VirtualCallTarget> TargetsForSlot, 397 MutableArrayRef<VirtualCallSite> CallSites) { 398 // See if the program contains a single implementation of this virtual 399 // function. 400 Function *TheFn = TargetsForSlot[0].Fn; 401 for (auto &&Target : TargetsForSlot) 402 if (TheFn != Target.Fn) 403 return false; 404 405 // If so, update each call site to call that implementation directly. 406 for (auto &&VCallSite : CallSites) { 407 VCallSite.CS.setCalledFunction(ConstantExpr::getBitCast( 408 TheFn, VCallSite.CS.getCalledValue()->getType())); 409 } 410 return true; 411 } 412 413 bool DevirtModule::tryEvaluateFunctionsWithArgs( 414 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 415 ArrayRef<ConstantInt *> Args) { 416 // Evaluate each function and store the result in each target's RetVal 417 // field. 418 for (VirtualCallTarget &Target : TargetsForSlot) { 419 if (Target.Fn->arg_size() != Args.size() + 1) 420 return false; 421 for (unsigned I = 0; I != Args.size(); ++I) 422 if (Target.Fn->getFunctionType()->getParamType(I + 1) != 423 Args[I]->getType()) 424 return false; 425 426 Evaluator Eval(M.getDataLayout(), nullptr); 427 SmallVector<Constant *, 2> EvalArgs; 428 EvalArgs.push_back( 429 Constant::getNullValue(Target.Fn->getFunctionType()->getParamType(0))); 430 EvalArgs.insert(EvalArgs.end(), Args.begin(), Args.end()); 431 Constant *RetVal; 432 if (!Eval.EvaluateFunction(Target.Fn, RetVal, EvalArgs) || 433 !isa<ConstantInt>(RetVal)) 434 return false; 435 Target.RetVal = cast<ConstantInt>(RetVal)->getZExtValue(); 436 } 437 return true; 438 } 439 440 bool DevirtModule::tryUniformRetValOpt( 441 IntegerType *RetType, ArrayRef<VirtualCallTarget> TargetsForSlot, 442 MutableArrayRef<VirtualCallSite> CallSites) { 443 // Uniform return value optimization. If all functions return the same 444 // constant, replace all calls with that constant. 445 uint64_t TheRetVal = TargetsForSlot[0].RetVal; 446 for (const VirtualCallTarget &Target : TargetsForSlot) 447 if (Target.RetVal != TheRetVal) 448 return false; 449 450 auto TheRetValConst = ConstantInt::get(RetType, TheRetVal); 451 for (auto Call : CallSites) 452 Call.replaceAndErase(TheRetValConst); 453 return true; 454 } 455 456 bool DevirtModule::tryUniqueRetValOpt( 457 unsigned BitWidth, ArrayRef<VirtualCallTarget> TargetsForSlot, 458 MutableArrayRef<VirtualCallSite> CallSites) { 459 // IsOne controls whether we look for a 0 or a 1. 460 auto tryUniqueRetValOptFor = [&](bool IsOne) { 461 const BitSetInfo *UniqueBitSet = 0; 462 for (const VirtualCallTarget &Target : TargetsForSlot) { 463 if (Target.RetVal == (IsOne ? 1 : 0)) { 464 if (UniqueBitSet) 465 return false; 466 UniqueBitSet = Target.BS; 467 } 468 } 469 470 // We should have found a unique bit set or bailed out by now. We already 471 // checked for a uniform return value in tryUniformRetValOpt. 472 assert(UniqueBitSet); 473 474 // Replace each call with the comparison. 475 for (auto &&Call : CallSites) { 476 IRBuilder<> B(Call.CS.getInstruction()); 477 Value *OneAddr = B.CreateBitCast(UniqueBitSet->Bits->GV, Int8PtrTy); 478 OneAddr = B.CreateConstGEP1_64(OneAddr, UniqueBitSet->Offset); 479 Value *Cmp = B.CreateICmp(IsOne ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, 480 Call.VTable, OneAddr); 481 Call.replaceAndErase(Cmp); 482 } 483 return true; 484 }; 485 486 if (BitWidth == 1) { 487 if (tryUniqueRetValOptFor(true)) 488 return true; 489 if (tryUniqueRetValOptFor(false)) 490 return true; 491 } 492 return false; 493 } 494 495 bool DevirtModule::tryVirtualConstProp( 496 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 497 ArrayRef<VirtualCallSite> CallSites) { 498 // This only works if the function returns an integer. 499 auto RetType = dyn_cast<IntegerType>(TargetsForSlot[0].Fn->getReturnType()); 500 if (!RetType) 501 return false; 502 unsigned BitWidth = RetType->getBitWidth(); 503 if (BitWidth > 64) 504 return false; 505 506 // Make sure that each function does not access memory, takes at least one 507 // argument, does not use its first argument (which we assume is 'this'), 508 // and has the same return type. 509 for (VirtualCallTarget &Target : TargetsForSlot) { 510 if (!Target.Fn->doesNotAccessMemory() || Target.Fn->arg_empty() || 511 !Target.Fn->arg_begin()->use_empty() || 512 Target.Fn->getReturnType() != RetType) 513 return false; 514 } 515 516 // Group call sites by the list of constant arguments they pass. 517 // The comparator ensures deterministic ordering. 518 struct ByAPIntValue { 519 bool operator()(const std::vector<ConstantInt *> &A, 520 const std::vector<ConstantInt *> &B) const { 521 return std::lexicographical_compare( 522 A.begin(), A.end(), B.begin(), B.end(), 523 [](ConstantInt *AI, ConstantInt *BI) { 524 return AI->getValue().ult(BI->getValue()); 525 }); 526 } 527 }; 528 std::map<std::vector<ConstantInt *>, std::vector<VirtualCallSite>, 529 ByAPIntValue> 530 VCallSitesByConstantArg; 531 for (auto &&VCallSite : CallSites) { 532 std::vector<ConstantInt *> Args; 533 if (VCallSite.CS.getType() != RetType) 534 continue; 535 for (auto &&Arg : 536 make_range(VCallSite.CS.arg_begin() + 1, VCallSite.CS.arg_end())) { 537 if (!isa<ConstantInt>(Arg)) 538 break; 539 Args.push_back(cast<ConstantInt>(&Arg)); 540 } 541 if (Args.size() + 1 != VCallSite.CS.arg_size()) 542 continue; 543 544 VCallSitesByConstantArg[Args].push_back(VCallSite); 545 } 546 547 for (auto &&CSByConstantArg : VCallSitesByConstantArg) { 548 if (!tryEvaluateFunctionsWithArgs(TargetsForSlot, CSByConstantArg.first)) 549 continue; 550 551 if (tryUniformRetValOpt(RetType, TargetsForSlot, CSByConstantArg.second)) 552 continue; 553 554 if (tryUniqueRetValOpt(BitWidth, TargetsForSlot, CSByConstantArg.second)) 555 continue; 556 557 // Find an allocation offset in bits in all vtables in the bitset. 558 uint64_t AllocBefore = 559 findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth); 560 uint64_t AllocAfter = 561 findLowestOffset(TargetsForSlot, /*IsAfter=*/true, BitWidth); 562 563 // Calculate the total amount of padding needed to store a value at both 564 // ends of the object. 565 uint64_t TotalPaddingBefore = 0, TotalPaddingAfter = 0; 566 for (auto &&Target : TargetsForSlot) { 567 TotalPaddingBefore += std::max<int64_t>( 568 (AllocBefore + 7) / 8 - Target.allocatedBeforeBytes() - 1, 0); 569 TotalPaddingAfter += std::max<int64_t>( 570 (AllocAfter + 7) / 8 - Target.allocatedAfterBytes() - 1, 0); 571 } 572 573 // If the amount of padding is too large, give up. 574 // FIXME: do something smarter here. 575 if (std::min(TotalPaddingBefore, TotalPaddingAfter) > 128) 576 continue; 577 578 // Calculate the offset to the value as a (possibly negative) byte offset 579 // and (if applicable) a bit offset, and store the values in the targets. 580 int64_t OffsetByte; 581 uint64_t OffsetBit; 582 if (TotalPaddingBefore <= TotalPaddingAfter) 583 setBeforeReturnValues(TargetsForSlot, AllocBefore, BitWidth, OffsetByte, 584 OffsetBit); 585 else 586 setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte, 587 OffsetBit); 588 589 // Rewrite each call to a load from OffsetByte/OffsetBit. 590 for (auto Call : CSByConstantArg.second) { 591 IRBuilder<> B(Call.CS.getInstruction()); 592 Value *Addr = B.CreateConstGEP1_64(Call.VTable, OffsetByte); 593 if (BitWidth == 1) { 594 Value *Bits = B.CreateLoad(Addr); 595 Value *Bit = ConstantInt::get(Int8Ty, 1ULL << OffsetBit); 596 Value *BitsAndBit = B.CreateAnd(Bits, Bit); 597 auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0)); 598 Call.replaceAndErase(IsBitSet); 599 } else { 600 Value *ValAddr = B.CreateBitCast(Addr, RetType->getPointerTo()); 601 Value *Val = B.CreateLoad(RetType, ValAddr); 602 Call.replaceAndErase(Val); 603 } 604 } 605 } 606 return true; 607 } 608 609 void DevirtModule::rebuildGlobal(VTableBits &B) { 610 if (B.Before.Bytes.empty() && B.After.Bytes.empty()) 611 return; 612 613 // Align each byte array to pointer width. 614 unsigned PointerSize = M.getDataLayout().getPointerSize(); 615 B.Before.Bytes.resize(alignTo(B.Before.Bytes.size(), PointerSize)); 616 B.After.Bytes.resize(alignTo(B.After.Bytes.size(), PointerSize)); 617 618 // Before was stored in reverse order; flip it now. 619 for (size_t I = 0, Size = B.Before.Bytes.size(); I != Size / 2; ++I) 620 std::swap(B.Before.Bytes[I], B.Before.Bytes[Size - 1 - I]); 621 622 // Build an anonymous global containing the before bytes, followed by the 623 // original initializer, followed by the after bytes. 624 auto NewInit = ConstantStruct::getAnon( 625 {ConstantDataArray::get(M.getContext(), B.Before.Bytes), 626 B.GV->getInitializer(), 627 ConstantDataArray::get(M.getContext(), B.After.Bytes)}); 628 auto NewGV = 629 new GlobalVariable(M, NewInit->getType(), B.GV->isConstant(), 630 GlobalVariable::PrivateLinkage, NewInit, "", B.GV); 631 NewGV->setSection(B.GV->getSection()); 632 NewGV->setComdat(B.GV->getComdat()); 633 634 // Build an alias named after the original global, pointing at the second 635 // element (the original initializer). 636 auto Alias = GlobalAlias::create( 637 B.GV->getInitializer()->getType(), 0, B.GV->getLinkage(), "", 638 ConstantExpr::getGetElementPtr( 639 NewInit->getType(), NewGV, 640 ArrayRef<Constant *>{ConstantInt::get(Int32Ty, 0), 641 ConstantInt::get(Int32Ty, 1)}), 642 &M); 643 Alias->setVisibility(B.GV->getVisibility()); 644 Alias->takeName(B.GV); 645 646 B.GV->replaceAllUsesWith(Alias); 647 B.GV->eraseFromParent(); 648 } 649 650 bool DevirtModule::run() { 651 Function *BitSetTestFunc = 652 M.getFunction(Intrinsic::getName(Intrinsic::bitset_test)); 653 if (!BitSetTestFunc || BitSetTestFunc->use_empty()) 654 return false; 655 656 Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume)); 657 if (!AssumeFunc || AssumeFunc->use_empty()) 658 return false; 659 660 // Find all virtual calls via a virtual table pointer %p under an assumption 661 // of the form llvm.assume(llvm.bitset.test(%p, %md)). This indicates that %p 662 // points to a vtable in the bitset %md. Group calls by (bitset, offset) pair 663 // (effectively the identity of the virtual function) and store to CallSlots. 664 DenseSet<Value *> SeenPtrs; 665 for (auto I = BitSetTestFunc->use_begin(), E = BitSetTestFunc->use_end(); 666 I != E;) { 667 auto CI = dyn_cast<CallInst>(I->getUser()); 668 ++I; 669 if (!CI) 670 continue; 671 672 // Find llvm.assume intrinsics for this llvm.bitset.test call. 673 SmallVector<CallInst *, 1> Assumes; 674 for (const Use &CIU : CI->uses()) { 675 auto AssumeCI = dyn_cast<CallInst>(CIU.getUser()); 676 if (AssumeCI && AssumeCI->getCalledValue() == AssumeFunc) 677 Assumes.push_back(AssumeCI); 678 } 679 680 // If we found any, search for virtual calls based on %p and add them to 681 // CallSlots. 682 if (!Assumes.empty()) { 683 Metadata *BitSet = 684 cast<MetadataAsValue>(CI->getArgOperand(1))->getMetadata(); 685 Value *Ptr = CI->getArgOperand(0)->stripPointerCasts(); 686 if (SeenPtrs.insert(Ptr).second) 687 findLoadCallsAtConstantOffset(BitSet, Ptr, 0, CI->getArgOperand(0)); 688 } 689 690 // We no longer need the assumes or the bitset test. 691 for (auto Assume : Assumes) 692 Assume->eraseFromParent(); 693 // We can't use RecursivelyDeleteTriviallyDeadInstructions here because we 694 // may use the vtable argument later. 695 if (CI->use_empty()) 696 CI->eraseFromParent(); 697 } 698 699 // Rebuild llvm.bitsets metadata into a map for easy lookup. 700 std::vector<VTableBits> Bits; 701 DenseMap<Metadata *, std::set<BitSetInfo>> BitSets; 702 buildBitSets(Bits, BitSets); 703 if (BitSets.empty()) 704 return true; 705 706 // For each (bitset, offset) pair: 707 bool DidVirtualConstProp = false; 708 for (auto &S : CallSlots) { 709 // Search each of the vtables in the bitset for the virtual function 710 // implementation at offset S.first.ByteOffset, and add to TargetsForSlot. 711 std::vector<VirtualCallTarget> TargetsForSlot; 712 if (!tryFindVirtualCallTargets(TargetsForSlot, BitSets[S.first.BitSetID], 713 S.first.ByteOffset)) 714 continue; 715 716 if (trySingleImplDevirt(TargetsForSlot, S.second)) 717 continue; 718 719 DidVirtualConstProp |= tryVirtualConstProp(TargetsForSlot, S.second); 720 } 721 722 // Rebuild each global we touched as part of virtual constant propagation to 723 // include the before and after bytes. 724 if (DidVirtualConstProp) 725 for (VTableBits &B : Bits) 726 rebuildGlobal(B); 727 728 return true; 729 } 730