1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/OpenMPOpt.h" 16 17 #include "llvm/ADT/EnumeratedArray.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/CallGraph.h" 20 #include "llvm/Analysis/CallGraphSCCPass.h" 21 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 22 #include "llvm/Frontend/OpenMP/OMPConstants.h" 23 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 24 #include "llvm/InitializePasses.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Transforms/IPO.h" 27 #include "llvm/Transforms/IPO/Attributor.h" 28 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 29 30 using namespace llvm; 31 using namespace omp; 32 using namespace types; 33 34 #define DEBUG_TYPE "openmp-opt" 35 36 static cl::opt<bool> DisableOpenMPOptimizations( 37 "openmp-opt-disable", cl::ZeroOrMore, 38 cl::desc("Disable OpenMP specific optimizations."), cl::Hidden, 39 cl::init(false)); 40 41 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 42 cl::Hidden); 43 44 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 45 "Number of OpenMP runtime calls deduplicated"); 46 STATISTIC(NumOpenMPParallelRegionsDeleted, 47 "Number of OpenMP parallel regions deleted"); 48 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 49 "Number of OpenMP runtime functions identified"); 50 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 51 "Number of OpenMP runtime function uses identified"); 52 53 #if !defined(NDEBUG) 54 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 55 #endif 56 57 /// Helper struct to store tracked ICV values at specif instructions. 58 struct ICVValue { 59 Instruction *Inst; 60 Value *TrackedValue; 61 62 ICVValue(Instruction *I, Value *Val) : Inst(I), TrackedValue(Val) {} 63 }; 64 65 namespace llvm { 66 67 // Provide DenseMapInfo for ICVValue 68 template <> struct DenseMapInfo<ICVValue> { 69 using InstInfo = DenseMapInfo<Instruction *>; 70 using ValueInfo = DenseMapInfo<Value *>; 71 72 static inline ICVValue getEmptyKey() { 73 return ICVValue(InstInfo::getEmptyKey(), ValueInfo::getEmptyKey()); 74 }; 75 76 static inline ICVValue getTombstoneKey() { 77 return ICVValue(InstInfo::getTombstoneKey(), ValueInfo::getTombstoneKey()); 78 }; 79 80 static unsigned getHashValue(const ICVValue &ICVVal) { 81 return detail::combineHashValue( 82 InstInfo::getHashValue(ICVVal.Inst), 83 ValueInfo::getHashValue(ICVVal.TrackedValue)); 84 } 85 86 static bool isEqual(const ICVValue &LHS, const ICVValue &RHS) { 87 return InstInfo::isEqual(LHS.Inst, RHS.Inst) && 88 ValueInfo::isEqual(LHS.TrackedValue, RHS.TrackedValue); 89 } 90 }; 91 92 } // end namespace llvm 93 94 namespace { 95 96 struct AAICVTracker; 97 98 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 99 /// Attributor runs. 100 struct OMPInformationCache : public InformationCache { 101 OMPInformationCache(Module &M, AnalysisGetter &AG, 102 BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC, 103 SmallPtrSetImpl<Function *> &ModuleSlice) 104 : InformationCache(M, AG, Allocator, CGSCC), ModuleSlice(ModuleSlice), 105 OMPBuilder(M) { 106 OMPBuilder.initialize(); 107 initializeRuntimeFunctions(); 108 initializeInternalControlVars(); 109 } 110 111 /// Generic information that describes an internal control variable. 112 struct InternalControlVarInfo { 113 /// The kind, as described by InternalControlVar enum. 114 InternalControlVar Kind; 115 116 /// The name of the ICV. 117 StringRef Name; 118 119 /// Environment variable associated with this ICV. 120 StringRef EnvVarName; 121 122 /// Initial value kind. 123 ICVInitValue InitKind; 124 125 /// Initial value. 126 ConstantInt *InitValue; 127 128 /// Setter RTL function associated with this ICV. 129 RuntimeFunction Setter; 130 131 /// Getter RTL function associated with this ICV. 132 RuntimeFunction Getter; 133 134 /// RTL Function corresponding to the override clause of this ICV 135 RuntimeFunction Clause; 136 }; 137 138 /// Generic information that describes a runtime function 139 struct RuntimeFunctionInfo { 140 141 /// The kind, as described by the RuntimeFunction enum. 142 RuntimeFunction Kind; 143 144 /// The name of the function. 145 StringRef Name; 146 147 /// Flag to indicate a variadic function. 148 bool IsVarArg; 149 150 /// The return type of the function. 151 Type *ReturnType; 152 153 /// The argument types of the function. 154 SmallVector<Type *, 8> ArgumentTypes; 155 156 /// The declaration if available. 157 Function *Declaration = nullptr; 158 159 /// Uses of this runtime function per function containing the use. 160 using UseVector = SmallVector<Use *, 16>; 161 162 /// Return the vector of uses in function \p F. 163 UseVector &getOrCreateUseVector(Function *F) { 164 std::shared_ptr<UseVector> &UV = UsesMap[F]; 165 if (!UV) 166 UV = std::make_shared<UseVector>(); 167 return *UV; 168 } 169 170 /// Return the vector of uses in function \p F or `nullptr` if there are 171 /// none. 172 const UseVector *getUseVector(Function &F) const { 173 auto I = UsesMap.find(&F); 174 if (I != UsesMap.end()) 175 return I->second.get(); 176 return nullptr; 177 } 178 179 /// Return how many functions contain uses of this runtime function. 180 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 181 182 /// Return the number of arguments (or the minimal number for variadic 183 /// functions). 184 size_t getNumArgs() const { return ArgumentTypes.size(); } 185 186 /// Run the callback \p CB on each use and forget the use if the result is 187 /// true. The callback will be fed the function in which the use was 188 /// encountered as second argument. 189 void foreachUse(function_ref<bool(Use &, Function &)> CB) { 190 for (auto &It : UsesMap) 191 foreachUse(CB, It.first, It.second.get()); 192 } 193 194 /// Run the callback \p CB on each use within the function \p F and forget 195 /// the use if the result is true. 196 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F, 197 UseVector *Uses = nullptr) { 198 SmallVector<unsigned, 8> ToBeDeleted; 199 ToBeDeleted.clear(); 200 201 unsigned Idx = 0; 202 UseVector &UV = Uses ? *Uses : getOrCreateUseVector(F); 203 204 for (Use *U : UV) { 205 if (CB(*U, *F)) 206 ToBeDeleted.push_back(Idx); 207 ++Idx; 208 } 209 210 // Remove the to-be-deleted indices in reverse order as prior 211 // modifcations will not modify the smaller indices. 212 while (!ToBeDeleted.empty()) { 213 unsigned Idx = ToBeDeleted.pop_back_val(); 214 UV[Idx] = UV.back(); 215 UV.pop_back(); 216 } 217 } 218 219 private: 220 /// Map from functions to all uses of this runtime function contained in 221 /// them. 222 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 223 }; 224 225 /// The slice of the module we are allowed to look at. 226 SmallPtrSetImpl<Function *> &ModuleSlice; 227 228 /// An OpenMP-IR-Builder instance 229 OpenMPIRBuilder OMPBuilder; 230 231 /// Map from runtime function kind to the runtime function description. 232 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 233 RuntimeFunction::OMPRTL___last> 234 RFIs; 235 236 /// Map from ICV kind to the ICV description. 237 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 238 InternalControlVar::ICV___last> 239 ICVs; 240 241 /// Helper to initialize all internal control variable information for those 242 /// defined in OMPKinds.def. 243 void initializeInternalControlVars() { 244 #define ICV_RT_SET(_Name, RTL) \ 245 { \ 246 auto &ICV = ICVs[_Name]; \ 247 ICV.Setter = RTL; \ 248 } 249 #define ICV_RT_GET(Name, RTL) \ 250 { \ 251 auto &ICV = ICVs[Name]; \ 252 ICV.Getter = RTL; \ 253 } 254 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 255 { \ 256 auto &ICV = ICVs[Enum]; \ 257 ICV.Name = _Name; \ 258 ICV.Kind = Enum; \ 259 ICV.InitKind = Init; \ 260 ICV.EnvVarName = _EnvVarName; \ 261 switch (ICV.InitKind) { \ 262 case ICV_IMPLEMENTATION_DEFINED: \ 263 ICV.InitValue = nullptr; \ 264 break; \ 265 case ICV_ZERO: \ 266 ICV.InitValue = \ 267 ConstantInt::get(Type::getInt32Ty(Int32->getContext()), 0); \ 268 break; \ 269 case ICV_FALSE: \ 270 ICV.InitValue = ConstantInt::getFalse(Int1->getContext()); \ 271 break; \ 272 case ICV_LAST: \ 273 break; \ 274 } \ 275 } 276 #include "llvm/Frontend/OpenMP/OMPKinds.def" 277 } 278 279 /// Returns true if the function declaration \p F matches the runtime 280 /// function types, that is, return type \p RTFRetType, and argument types 281 /// \p RTFArgTypes. 282 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 283 SmallVector<Type *, 8> &RTFArgTypes) { 284 // TODO: We should output information to the user (under debug output 285 // and via remarks). 286 287 if (!F) 288 return false; 289 if (F->getReturnType() != RTFRetType) 290 return false; 291 if (F->arg_size() != RTFArgTypes.size()) 292 return false; 293 294 auto RTFTyIt = RTFArgTypes.begin(); 295 for (Argument &Arg : F->args()) { 296 if (Arg.getType() != *RTFTyIt) 297 return false; 298 299 ++RTFTyIt; 300 } 301 302 return true; 303 } 304 305 /// Helper to initialize all runtime function information for those defined 306 /// in OpenMPKinds.def. 307 void initializeRuntimeFunctions() { 308 // Helper to collect all uses of the decleration in the UsesMap. 309 auto CollectUses = [&](RuntimeFunctionInfo &RFI) { 310 unsigned NumUses = 0; 311 if (!RFI.Declaration) 312 return NumUses; 313 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 314 315 NumOpenMPRuntimeFunctionsIdentified += 1; 316 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 317 318 // TODO: We directly convert uses into proper calls and unknown uses. 319 for (Use &U : RFI.Declaration->uses()) { 320 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 321 if (ModuleSlice.count(UserI->getFunction())) { 322 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 323 ++NumUses; 324 } 325 } else { 326 RFI.getOrCreateUseVector(nullptr).push_back(&U); 327 ++NumUses; 328 } 329 } 330 return NumUses; 331 }; 332 333 Module &M = *((*ModuleSlice.begin())->getParent()); 334 335 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 336 { \ 337 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 338 Function *F = M.getFunction(_Name); \ 339 if (declMatchesRTFTypes(F, _ReturnType, ArgsTypes)) { \ 340 auto &RFI = RFIs[_Enum]; \ 341 RFI.Kind = _Enum; \ 342 RFI.Name = _Name; \ 343 RFI.IsVarArg = _IsVarArg; \ 344 RFI.ReturnType = _ReturnType; \ 345 RFI.ArgumentTypes = std::move(ArgsTypes); \ 346 RFI.Declaration = F; \ 347 unsigned NumUses = CollectUses(RFI); \ 348 (void)NumUses; \ 349 LLVM_DEBUG({ \ 350 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 351 << " found\n"; \ 352 if (RFI.Declaration) \ 353 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 354 << RFI.getNumFunctionsWithUses() \ 355 << " different functions.\n"; \ 356 }); \ 357 } \ 358 } 359 #include "llvm/Frontend/OpenMP/OMPKinds.def" 360 361 // TODO: We should attach the attributes defined in OMPKinds.def. 362 } 363 }; 364 365 struct OpenMPOpt { 366 367 using OptimizationRemarkGetter = 368 function_ref<OptimizationRemarkEmitter &(Function *)>; 369 370 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 371 OptimizationRemarkGetter OREGetter, 372 OMPInformationCache &OMPInfoCache, Attributor &A) 373 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 374 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 375 376 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 377 bool run() { 378 bool Changed = false; 379 380 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 381 << " functions in a slice with " 382 << OMPInfoCache.ModuleSlice.size() << " functions\n"); 383 384 /// Print initial ICV values for testing. 385 /// FIXME: This should be done from the Attributor once it is added. 386 if (PrintICVValues) { 387 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel}; 388 389 for (Function *F : OMPInfoCache.ModuleSlice) { 390 for (auto ICV : ICVs) { 391 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 392 auto Remark = [&](OptimizationRemark OR) { 393 return OR << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 394 << " Value: " 395 << (ICVInfo.InitValue 396 ? ICVInfo.InitValue->getValue().toString(10, true) 397 : "IMPLEMENTATION_DEFINED"); 398 }; 399 400 emitRemarkOnFunction(F, "OpenMPICVTracker", Remark); 401 } 402 } 403 } 404 405 Changed |= runAttributor(); 406 Changed |= deduplicateRuntimeCalls(); 407 Changed |= deleteParallelRegions(); 408 409 return Changed; 410 } 411 412 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 413 /// given it has to be the callee or a nullptr is returned. 414 static CallInst *getCallIfRegularCall( 415 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 416 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 417 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 418 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 419 return CI; 420 return nullptr; 421 } 422 423 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 424 /// the callee or a nullptr is returned. 425 static CallInst *getCallIfRegularCall( 426 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 427 CallInst *CI = dyn_cast<CallInst>(&V); 428 if (CI && !CI->hasOperandBundles() && 429 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 430 return CI; 431 return nullptr; 432 } 433 434 private: 435 /// Try to delete parallel regions if possible. 436 bool deleteParallelRegions() { 437 const unsigned CallbackCalleeOperand = 2; 438 439 OMPInformationCache::RuntimeFunctionInfo &RFI = 440 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 441 442 if (!RFI.Declaration) 443 return false; 444 445 bool Changed = false; 446 auto DeleteCallCB = [&](Use &U, Function &) { 447 CallInst *CI = getCallIfRegularCall(U); 448 if (!CI) 449 return false; 450 auto *Fn = dyn_cast<Function>( 451 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 452 if (!Fn) 453 return false; 454 if (!Fn->onlyReadsMemory()) 455 return false; 456 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 457 return false; 458 459 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 460 << CI->getCaller()->getName() << "\n"); 461 462 auto Remark = [&](OptimizationRemark OR) { 463 return OR << "Parallel region in " 464 << ore::NV("OpenMPParallelDelete", CI->getCaller()->getName()) 465 << " deleted"; 466 }; 467 emitRemark<OptimizationRemark>(CI, "OpenMPParallelRegionDeletion", 468 Remark); 469 470 CGUpdater.removeCallSite(*CI); 471 CI->eraseFromParent(); 472 Changed = true; 473 ++NumOpenMPParallelRegionsDeleted; 474 return true; 475 }; 476 477 RFI.foreachUse(DeleteCallCB); 478 479 return Changed; 480 } 481 482 /// Try to eliminiate runtime calls by reusing existing ones. 483 bool deduplicateRuntimeCalls() { 484 bool Changed = false; 485 486 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 487 OMPRTL_omp_get_num_threads, 488 OMPRTL_omp_in_parallel, 489 OMPRTL_omp_get_cancellation, 490 OMPRTL_omp_get_thread_limit, 491 OMPRTL_omp_get_supported_active_levels, 492 OMPRTL_omp_get_level, 493 OMPRTL_omp_get_ancestor_thread_num, 494 OMPRTL_omp_get_team_size, 495 OMPRTL_omp_get_active_level, 496 OMPRTL_omp_in_final, 497 OMPRTL_omp_get_proc_bind, 498 OMPRTL_omp_get_num_places, 499 OMPRTL_omp_get_num_procs, 500 OMPRTL_omp_get_place_num, 501 OMPRTL_omp_get_partition_num_places, 502 OMPRTL_omp_get_partition_place_nums}; 503 504 // Global-tid is handled separately. 505 SmallSetVector<Value *, 16> GTIdArgs; 506 collectGlobalThreadIdArguments(GTIdArgs); 507 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 508 << " global thread ID arguments\n"); 509 510 for (Function *F : SCC) { 511 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 512 deduplicateRuntimeCalls(*F, 513 OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 514 515 // __kmpc_global_thread_num is special as we can replace it with an 516 // argument in enough cases to make it worth trying. 517 Value *GTIdArg = nullptr; 518 for (Argument &Arg : F->args()) 519 if (GTIdArgs.count(&Arg)) { 520 GTIdArg = &Arg; 521 break; 522 } 523 Changed |= deduplicateRuntimeCalls( 524 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 525 } 526 527 return Changed; 528 } 529 530 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 531 bool GlobalOnly, bool &SingleChoice) { 532 if (CurrentIdent == NextIdent) 533 return CurrentIdent; 534 535 // TODO: Figure out how to actually combine multiple debug locations. For 536 // now we just keep an existing one if there is a single choice. 537 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 538 SingleChoice = !CurrentIdent; 539 return NextIdent; 540 } 541 return nullptr; 542 } 543 544 /// Return an `struct ident_t*` value that represents the ones used in the 545 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 546 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 547 /// return value we create one from scratch. We also do not yet combine 548 /// information, e.g., the source locations, see combinedIdentStruct. 549 Value * 550 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 551 Function &F, bool GlobalOnly) { 552 bool SingleChoice = true; 553 Value *Ident = nullptr; 554 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 555 CallInst *CI = getCallIfRegularCall(U, &RFI); 556 if (!CI || &F != &Caller) 557 return false; 558 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 559 /* GlobalOnly */ true, SingleChoice); 560 return false; 561 }; 562 RFI.foreachUse(CombineIdentStruct); 563 564 if (!Ident || !SingleChoice) { 565 // The IRBuilder uses the insertion block to get to the module, this is 566 // unfortunate but we work around it for now. 567 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 568 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 569 &F.getEntryBlock(), F.getEntryBlock().begin())); 570 // Create a fallback location if non was found. 571 // TODO: Use the debug locations of the calls instead. 572 Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(); 573 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc); 574 } 575 return Ident; 576 } 577 578 /// Try to eliminiate calls of \p RFI in \p F by reusing an existing one or 579 /// \p ReplVal if given. 580 bool deduplicateRuntimeCalls(Function &F, 581 OMPInformationCache::RuntimeFunctionInfo &RFI, 582 Value *ReplVal = nullptr) { 583 auto *UV = RFI.getUseVector(F); 584 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 585 return false; 586 587 LLVM_DEBUG( 588 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 589 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 590 591 assert((!ReplVal || (isa<Argument>(ReplVal) && 592 cast<Argument>(ReplVal)->getParent() == &F)) && 593 "Unexpected replacement value!"); 594 595 // TODO: Use dominance to find a good position instead. 596 auto CanBeMoved = [](CallBase &CB) { 597 unsigned NumArgs = CB.getNumArgOperands(); 598 if (NumArgs == 0) 599 return true; 600 if (CB.getArgOperand(0)->getType() != IdentPtr) 601 return false; 602 for (unsigned u = 1; u < NumArgs; ++u) 603 if (isa<Instruction>(CB.getArgOperand(u))) 604 return false; 605 return true; 606 }; 607 608 if (!ReplVal) { 609 for (Use *U : *UV) 610 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 611 if (!CanBeMoved(*CI)) 612 continue; 613 614 auto Remark = [&](OptimizationRemark OR) { 615 auto newLoc = &*F.getEntryBlock().getFirstInsertionPt(); 616 return OR << "OpenMP runtime call " 617 << ore::NV("OpenMPOptRuntime", RFI.Name) << " moved to " 618 << ore::NV("OpenMPRuntimeMoves", newLoc->getDebugLoc()); 619 }; 620 emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeCodeMotion", Remark); 621 622 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 623 ReplVal = CI; 624 break; 625 } 626 if (!ReplVal) 627 return false; 628 } 629 630 // If we use a call as a replacement value we need to make sure the ident is 631 // valid at the new location. For now we just pick a global one, either 632 // existing and used by one of the calls, or created from scratch. 633 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 634 if (CI->getNumArgOperands() > 0 && 635 CI->getArgOperand(0)->getType() == IdentPtr) { 636 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 637 /* GlobalOnly */ true); 638 CI->setArgOperand(0, Ident); 639 } 640 } 641 642 bool Changed = false; 643 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 644 CallInst *CI = getCallIfRegularCall(U, &RFI); 645 if (!CI || CI == ReplVal || &F != &Caller) 646 return false; 647 assert(CI->getCaller() == &F && "Unexpected call!"); 648 649 auto Remark = [&](OptimizationRemark OR) { 650 return OR << "OpenMP runtime call " 651 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated"; 652 }; 653 emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeDeduplicated", Remark); 654 655 CGUpdater.removeCallSite(*CI); 656 CI->replaceAllUsesWith(ReplVal); 657 CI->eraseFromParent(); 658 ++NumOpenMPRuntimeCallsDeduplicated; 659 Changed = true; 660 return true; 661 }; 662 RFI.foreachUse(ReplaceAndDeleteCB); 663 664 return Changed; 665 } 666 667 /// Collect arguments that represent the global thread id in \p GTIdArgs. 668 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 669 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 670 // initialization. We could define an AbstractAttribute instead and 671 // run the Attributor here once it can be run as an SCC pass. 672 673 // Helper to check the argument \p ArgNo at all call sites of \p F for 674 // a GTId. 675 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 676 if (!F.hasLocalLinkage()) 677 return false; 678 for (Use &U : F.uses()) { 679 if (CallInst *CI = getCallIfRegularCall(U)) { 680 Value *ArgOp = CI->getArgOperand(ArgNo); 681 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 682 getCallIfRegularCall( 683 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 684 continue; 685 } 686 return false; 687 } 688 return true; 689 }; 690 691 // Helper to identify uses of a GTId as GTId arguments. 692 auto AddUserArgs = [&](Value >Id) { 693 for (Use &U : GTId.uses()) 694 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 695 if (CI->isArgOperand(&U)) 696 if (Function *Callee = CI->getCalledFunction()) 697 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 698 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 699 }; 700 701 // The argument users of __kmpc_global_thread_num calls are GTIds. 702 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 703 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 704 705 GlobThreadNumRFI.foreachUse([&](Use &U, Function &F) { 706 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 707 AddUserArgs(*CI); 708 return false; 709 }); 710 711 // Transitively search for more arguments by looking at the users of the 712 // ones we know already. During the search the GTIdArgs vector is extended 713 // so we cannot cache the size nor can we use a range based for. 714 for (unsigned u = 0; u < GTIdArgs.size(); ++u) 715 AddUserArgs(*GTIdArgs[u]); 716 } 717 718 /// Emit a remark generically 719 /// 720 /// This template function can be used to generically emit a remark. The 721 /// RemarkKind should be one of the following: 722 /// - OptimizationRemark to indicate a successful optimization attempt 723 /// - OptimizationRemarkMissed to report a failed optimization attempt 724 /// - OptimizationRemarkAnalysis to provide additional information about an 725 /// optimization attempt 726 /// 727 /// The remark is built using a callback function provided by the caller that 728 /// takes a RemarkKind as input and returns a RemarkKind. 729 template <typename RemarkKind, 730 typename RemarkCallBack = function_ref<RemarkKind(RemarkKind &&)>> 731 void emitRemark(Instruction *Inst, StringRef RemarkName, 732 RemarkCallBack &&RemarkCB) { 733 Function *F = Inst->getParent()->getParent(); 734 auto &ORE = OREGetter(F); 735 736 ORE.emit( 737 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, Inst)); }); 738 } 739 740 /// Emit a remark on a function. Since only OptimizationRemark is supporting 741 /// this, it can't be made generic. 742 void emitRemarkOnFunction( 743 Function *F, StringRef RemarkName, 744 function_ref<OptimizationRemark(OptimizationRemark &&)> &&RemarkCB) { 745 auto &ORE = OREGetter(F); 746 747 ORE.emit([&]() { 748 return RemarkCB(OptimizationRemark(DEBUG_TYPE, RemarkName, F)); 749 }); 750 } 751 752 /// The underyling module. 753 Module &M; 754 755 /// The SCC we are operating on. 756 SmallVectorImpl<Function *> &SCC; 757 758 /// Callback to update the call graph, the first argument is a removed call, 759 /// the second an optional replacement call. 760 CallGraphUpdater &CGUpdater; 761 762 /// Callback to get an OptimizationRemarkEmitter from a Function * 763 OptimizationRemarkGetter OREGetter; 764 765 /// OpenMP-specific information cache. Also Used for Attributor runs. 766 OMPInformationCache &OMPInfoCache; 767 768 /// Attributor instance. 769 Attributor &A; 770 771 /// Helper function to run Attributor on SCC. 772 bool runAttributor() { 773 if (SCC.empty()) 774 return false; 775 776 registerAAs(); 777 778 ChangeStatus Changed = A.run(); 779 780 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 781 << " functions, result: " << Changed << ".\n"); 782 783 return Changed == ChangeStatus::CHANGED; 784 } 785 786 /// Populate the Attributor with abstract attribute opportunities in the 787 /// function. 788 void registerAAs() { 789 for (Function *F : SCC) { 790 if (F->isDeclaration()) 791 continue; 792 793 A.getOrCreateAAFor<AAICVTracker>(IRPosition::function(*F)); 794 } 795 } 796 }; 797 798 /// Abstract Attribute for tracking ICV values. 799 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 800 using Base = StateWrapper<BooleanState, AbstractAttribute>; 801 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 802 803 /// Returns true if value is assumed to be tracked. 804 bool isAssumedTracked() const { return getAssumed(); } 805 806 /// Returns true if value is known to be tracked. 807 bool isKnownTracked() const { return getAssumed(); } 808 809 /// Create an abstract attribute biew for the position \p IRP. 810 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 811 812 /// Return the value with which \p I can be replaced for specific \p ICV. 813 virtual Value *getReplacementValue(InternalControlVar ICV, 814 const Instruction *I, Attributor &A) = 0; 815 816 /// See AbstractAttribute::getName() 817 const std::string getName() const override { return "AAICVTracker"; } 818 819 static const char ID; 820 }; 821 822 struct AAICVTrackerFunction : public AAICVTracker { 823 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 824 : AAICVTracker(IRP, A) {} 825 826 // FIXME: come up with better string. 827 const std::string getAsStr() const override { return "ICVTracker"; } 828 829 // FIXME: come up with some stats. 830 void trackStatistics() const override {} 831 832 /// TODO: decide whether to deduplicate here, or use current 833 /// deduplicateRuntimeCalls function. 834 ChangeStatus manifest(Attributor &A) override { 835 ChangeStatus Changed = ChangeStatus::UNCHANGED; 836 837 for (InternalControlVar &ICV : TrackableICVs) 838 if (deduplicateICVGetters(ICV, A)) 839 Changed = ChangeStatus::CHANGED; 840 841 return Changed; 842 } 843 844 bool deduplicateICVGetters(InternalControlVar &ICV, Attributor &A) { 845 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 846 auto &ICVInfo = OMPInfoCache.ICVs[ICV]; 847 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 848 849 bool Changed = false; 850 851 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 852 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 853 Instruction *UserI = cast<Instruction>(U.getUser()); 854 Value *ReplVal = getReplacementValue(ICV, UserI, A); 855 856 if (!ReplVal || !CI) 857 return false; 858 859 A.removeCallSite(CI); 860 CI->replaceAllUsesWith(ReplVal); 861 CI->eraseFromParent(); 862 Changed = true; 863 return true; 864 }; 865 866 GetterRFI.foreachUse(ReplaceAndDeleteCB); 867 return Changed; 868 } 869 870 // Map of ICV to their values at specific program point. 871 EnumeratedArray<SmallSetVector<ICVValue, 4>, InternalControlVar, 872 InternalControlVar::ICV___last> 873 ICVValuesMap; 874 875 // Currently only nthreads is being tracked. 876 // this array will only grow with time. 877 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 878 879 ChangeStatus updateImpl(Attributor &A) override { 880 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 881 882 Function *F = getAnchorScope(); 883 884 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 885 886 for (InternalControlVar ICV : TrackableICVs) { 887 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 888 889 auto TrackValues = [&](Use &U, Function &) { 890 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 891 if (!CI) 892 return false; 893 894 // FIXME: handle setters with more that 1 arguments. 895 /// Track new value. 896 if (ICVValuesMap[ICV].insert(ICVValue(CI, CI->getArgOperand(0)))) 897 HasChanged = ChangeStatus::CHANGED; 898 899 return false; 900 }; 901 902 SetterRFI.foreachUse(TrackValues, F); 903 } 904 905 return HasChanged; 906 } 907 908 /// Return the value with which \p I can be replaced for specific \p ICV. 909 Value *getReplacementValue(InternalControlVar ICV, const Instruction *I, 910 Attributor &A) override { 911 const BasicBlock *CurrBB = I->getParent(); 912 913 auto &ValuesSet = ICVValuesMap[ICV]; 914 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 915 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 916 917 for (const auto &ICVVal : ValuesSet) { 918 if (CurrBB == ICVVal.Inst->getParent()) { 919 if (!ICVVal.Inst->comesBefore(I)) 920 continue; 921 922 // both instructions are in the same BB and at \p I we know the ICV 923 // value. 924 while (I != ICVVal.Inst) { 925 // we don't yet know if a call might update an ICV. 926 // TODO: check callsite AA for value. 927 if (const auto *CB = dyn_cast<CallBase>(I)) 928 if (CB->getCalledFunction() != GetterRFI.Declaration) 929 return nullptr; 930 931 I = I->getPrevNode(); 932 } 933 934 // No call in between, return the value. 935 return ICVVal.TrackedValue; 936 } 937 } 938 939 // No value was tracked. 940 return nullptr; 941 } 942 }; 943 } // namespace 944 945 const char AAICVTracker::ID = 0; 946 947 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 948 Attributor &A) { 949 AAICVTracker *AA = nullptr; 950 switch (IRP.getPositionKind()) { 951 case IRPosition::IRP_INVALID: 952 case IRPosition::IRP_FLOAT: 953 case IRPosition::IRP_ARGUMENT: 954 case IRPosition::IRP_RETURNED: 955 case IRPosition::IRP_CALL_SITE_RETURNED: 956 case IRPosition::IRP_CALL_SITE_ARGUMENT: 957 case IRPosition::IRP_CALL_SITE: 958 llvm_unreachable("ICVTracker can only be created for function position!"); 959 case IRPosition::IRP_FUNCTION: 960 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 961 break; 962 } 963 964 return *AA; 965 } 966 967 PreservedAnalyses OpenMPOptPass::run(LazyCallGraph::SCC &C, 968 CGSCCAnalysisManager &AM, 969 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 970 if (!containsOpenMP(*C.begin()->getFunction().getParent(), OMPInModule)) 971 return PreservedAnalyses::all(); 972 973 if (DisableOpenMPOptimizations) 974 return PreservedAnalyses::all(); 975 976 SmallPtrSet<Function *, 16> ModuleSlice; 977 SmallVector<Function *, 16> SCC; 978 for (LazyCallGraph::Node &N : C) { 979 SCC.push_back(&N.getFunction()); 980 ModuleSlice.insert(SCC.back()); 981 } 982 983 if (SCC.empty()) 984 return PreservedAnalyses::all(); 985 986 FunctionAnalysisManager &FAM = 987 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 988 989 AnalysisGetter AG(FAM); 990 991 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 992 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 993 }; 994 995 CallGraphUpdater CGUpdater; 996 CGUpdater.initialize(CG, C, AM, UR); 997 998 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 999 BumpPtrAllocator Allocator; 1000 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 1001 /*CGSCC*/ &Functions, ModuleSlice); 1002 1003 Attributor A(Functions, InfoCache, CGUpdater); 1004 1005 // TODO: Compute the module slice we are allowed to look at. 1006 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 1007 bool Changed = OMPOpt.run(); 1008 (void)Changed; 1009 return PreservedAnalyses::all(); 1010 } 1011 1012 namespace { 1013 1014 struct OpenMPOptLegacyPass : public CallGraphSCCPass { 1015 CallGraphUpdater CGUpdater; 1016 OpenMPInModule OMPInModule; 1017 static char ID; 1018 1019 OpenMPOptLegacyPass() : CallGraphSCCPass(ID) { 1020 initializeOpenMPOptLegacyPassPass(*PassRegistry::getPassRegistry()); 1021 } 1022 1023 void getAnalysisUsage(AnalysisUsage &AU) const override { 1024 CallGraphSCCPass::getAnalysisUsage(AU); 1025 } 1026 1027 bool doInitialization(CallGraph &CG) override { 1028 // Disable the pass if there is no OpenMP (runtime call) in the module. 1029 containsOpenMP(CG.getModule(), OMPInModule); 1030 return false; 1031 } 1032 1033 bool runOnSCC(CallGraphSCC &CGSCC) override { 1034 if (!containsOpenMP(CGSCC.getCallGraph().getModule(), OMPInModule)) 1035 return false; 1036 if (DisableOpenMPOptimizations || skipSCC(CGSCC)) 1037 return false; 1038 1039 SmallPtrSet<Function *, 16> ModuleSlice; 1040 SmallVector<Function *, 16> SCC; 1041 for (CallGraphNode *CGN : CGSCC) 1042 if (Function *Fn = CGN->getFunction()) 1043 if (!Fn->isDeclaration()) { 1044 SCC.push_back(Fn); 1045 ModuleSlice.insert(Fn); 1046 } 1047 1048 if (SCC.empty()) 1049 return false; 1050 1051 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1052 CGUpdater.initialize(CG, CGSCC); 1053 1054 // Maintain a map of functions to avoid rebuilding the ORE 1055 DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap; 1056 auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & { 1057 std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F]; 1058 if (!ORE) 1059 ORE = std::make_unique<OptimizationRemarkEmitter>(F); 1060 return *ORE; 1061 }; 1062 1063 AnalysisGetter AG; 1064 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 1065 BumpPtrAllocator Allocator; 1066 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, 1067 Allocator, 1068 /*CGSCC*/ &Functions, ModuleSlice); 1069 1070 Attributor A(Functions, InfoCache, CGUpdater); 1071 1072 // TODO: Compute the module slice we are allowed to look at. 1073 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 1074 return OMPOpt.run(); 1075 } 1076 1077 bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } 1078 }; 1079 1080 } // end anonymous namespace 1081 1082 bool llvm::omp::containsOpenMP(Module &M, OpenMPInModule &OMPInModule) { 1083 if (OMPInModule.isKnown()) 1084 return OMPInModule; 1085 1086 #define OMP_RTL(_Enum, _Name, ...) \ 1087 if (M.getFunction(_Name)) \ 1088 return OMPInModule = true; 1089 #include "llvm/Frontend/OpenMP/OMPKinds.def" 1090 return OMPInModule = false; 1091 } 1092 1093 char OpenMPOptLegacyPass::ID = 0; 1094 1095 INITIALIZE_PASS_BEGIN(OpenMPOptLegacyPass, "openmpopt", 1096 "OpenMP specific optimizations", false, false) 1097 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1098 INITIALIZE_PASS_END(OpenMPOptLegacyPass, "openmpopt", 1099 "OpenMP specific optimizations", false, false) 1100 1101 Pass *llvm::createOpenMPOptLegacyPass() { return new OpenMPOptLegacyPass(); } 1102