1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/OpenMPOpt.h" 16 17 #include "llvm/ADT/EnumeratedArray.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/CallGraph.h" 20 #include "llvm/Analysis/CallGraphSCCPass.h" 21 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/Frontend/OpenMP/OMPConstants.h" 24 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 25 #include "llvm/InitializePasses.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Transforms/IPO.h" 28 #include "llvm/Transforms/IPO/Attributor.h" 29 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 30 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 31 #include "llvm/Transforms/Utils/CodeExtractor.h" 32 33 using namespace llvm; 34 using namespace omp; 35 36 #define DEBUG_TYPE "openmp-opt" 37 38 static cl::opt<bool> DisableOpenMPOptimizations( 39 "openmp-opt-disable", cl::ZeroOrMore, 40 cl::desc("Disable OpenMP specific optimizations."), cl::Hidden, 41 cl::init(false)); 42 43 static cl::opt<bool> EnableParallelRegionMerging( 44 "openmp-opt-enable-merging", cl::ZeroOrMore, 45 cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden, 46 cl::init(false)); 47 48 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 49 cl::Hidden); 50 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels", 51 cl::init(false), cl::Hidden); 52 53 static cl::opt<bool> HideMemoryTransferLatency( 54 "openmp-hide-memory-transfer-latency", 55 cl::desc("[WIP] Tries to hide the latency of host to device memory" 56 " transfers"), 57 cl::Hidden, cl::init(false)); 58 59 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 60 "Number of OpenMP runtime calls deduplicated"); 61 STATISTIC(NumOpenMPParallelRegionsDeleted, 62 "Number of OpenMP parallel regions deleted"); 63 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 64 "Number of OpenMP runtime functions identified"); 65 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 66 "Number of OpenMP runtime function uses identified"); 67 STATISTIC(NumOpenMPTargetRegionKernels, 68 "Number of OpenMP target region entry points (=kernels) identified"); 69 STATISTIC( 70 NumOpenMPParallelRegionsReplacedInGPUStateMachine, 71 "Number of OpenMP parallel regions replaced with ID in GPU state machines"); 72 STATISTIC(NumOpenMPParallelRegionsMerged, 73 "Number of OpenMP parallel regions merged"); 74 75 #if !defined(NDEBUG) 76 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 77 #endif 78 79 namespace { 80 81 struct AAICVTracker; 82 83 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 84 /// Attributor runs. 85 struct OMPInformationCache : public InformationCache { 86 OMPInformationCache(Module &M, AnalysisGetter &AG, 87 BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC, 88 SmallPtrSetImpl<Kernel> &Kernels) 89 : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M), 90 Kernels(Kernels) { 91 92 OMPBuilder.initialize(); 93 initializeRuntimeFunctions(); 94 initializeInternalControlVars(); 95 } 96 97 /// Generic information that describes an internal control variable. 98 struct InternalControlVarInfo { 99 /// The kind, as described by InternalControlVar enum. 100 InternalControlVar Kind; 101 102 /// The name of the ICV. 103 StringRef Name; 104 105 /// Environment variable associated with this ICV. 106 StringRef EnvVarName; 107 108 /// Initial value kind. 109 ICVInitValue InitKind; 110 111 /// Initial value. 112 ConstantInt *InitValue; 113 114 /// Setter RTL function associated with this ICV. 115 RuntimeFunction Setter; 116 117 /// Getter RTL function associated with this ICV. 118 RuntimeFunction Getter; 119 120 /// RTL Function corresponding to the override clause of this ICV 121 RuntimeFunction Clause; 122 }; 123 124 /// Generic information that describes a runtime function 125 struct RuntimeFunctionInfo { 126 127 /// The kind, as described by the RuntimeFunction enum. 128 RuntimeFunction Kind; 129 130 /// The name of the function. 131 StringRef Name; 132 133 /// Flag to indicate a variadic function. 134 bool IsVarArg; 135 136 /// The return type of the function. 137 Type *ReturnType; 138 139 /// The argument types of the function. 140 SmallVector<Type *, 8> ArgumentTypes; 141 142 /// The declaration if available. 143 Function *Declaration = nullptr; 144 145 /// Uses of this runtime function per function containing the use. 146 using UseVector = SmallVector<Use *, 16>; 147 148 /// Clear UsesMap for runtime function. 149 void clearUsesMap() { UsesMap.clear(); } 150 151 /// Boolean conversion that is true if the runtime function was found. 152 operator bool() const { return Declaration; } 153 154 /// Return the vector of uses in function \p F. 155 UseVector &getOrCreateUseVector(Function *F) { 156 std::shared_ptr<UseVector> &UV = UsesMap[F]; 157 if (!UV) 158 UV = std::make_shared<UseVector>(); 159 return *UV; 160 } 161 162 /// Return the vector of uses in function \p F or `nullptr` if there are 163 /// none. 164 const UseVector *getUseVector(Function &F) const { 165 auto I = UsesMap.find(&F); 166 if (I != UsesMap.end()) 167 return I->second.get(); 168 return nullptr; 169 } 170 171 /// Return how many functions contain uses of this runtime function. 172 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 173 174 /// Return the number of arguments (or the minimal number for variadic 175 /// functions). 176 size_t getNumArgs() const { return ArgumentTypes.size(); } 177 178 /// Run the callback \p CB on each use and forget the use if the result is 179 /// true. The callback will be fed the function in which the use was 180 /// encountered as second argument. 181 void foreachUse(SmallVectorImpl<Function *> &SCC, 182 function_ref<bool(Use &, Function &)> CB) { 183 for (Function *F : SCC) 184 foreachUse(CB, F); 185 } 186 187 /// Run the callback \p CB on each use within the function \p F and forget 188 /// the use if the result is true. 189 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) { 190 SmallVector<unsigned, 8> ToBeDeleted; 191 ToBeDeleted.clear(); 192 193 unsigned Idx = 0; 194 UseVector &UV = getOrCreateUseVector(F); 195 196 for (Use *U : UV) { 197 if (CB(*U, *F)) 198 ToBeDeleted.push_back(Idx); 199 ++Idx; 200 } 201 202 // Remove the to-be-deleted indices in reverse order as prior 203 // modifications will not modify the smaller indices. 204 while (!ToBeDeleted.empty()) { 205 unsigned Idx = ToBeDeleted.pop_back_val(); 206 UV[Idx] = UV.back(); 207 UV.pop_back(); 208 } 209 } 210 211 private: 212 /// Map from functions to all uses of this runtime function contained in 213 /// them. 214 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 215 }; 216 217 /// An OpenMP-IR-Builder instance 218 OpenMPIRBuilder OMPBuilder; 219 220 /// Map from runtime function kind to the runtime function description. 221 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 222 RuntimeFunction::OMPRTL___last> 223 RFIs; 224 225 /// Map from ICV kind to the ICV description. 226 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 227 InternalControlVar::ICV___last> 228 ICVs; 229 230 /// Helper to initialize all internal control variable information for those 231 /// defined in OMPKinds.def. 232 void initializeInternalControlVars() { 233 #define ICV_RT_SET(_Name, RTL) \ 234 { \ 235 auto &ICV = ICVs[_Name]; \ 236 ICV.Setter = RTL; \ 237 } 238 #define ICV_RT_GET(Name, RTL) \ 239 { \ 240 auto &ICV = ICVs[Name]; \ 241 ICV.Getter = RTL; \ 242 } 243 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 244 { \ 245 auto &ICV = ICVs[Enum]; \ 246 ICV.Name = _Name; \ 247 ICV.Kind = Enum; \ 248 ICV.InitKind = Init; \ 249 ICV.EnvVarName = _EnvVarName; \ 250 switch (ICV.InitKind) { \ 251 case ICV_IMPLEMENTATION_DEFINED: \ 252 ICV.InitValue = nullptr; \ 253 break; \ 254 case ICV_ZERO: \ 255 ICV.InitValue = ConstantInt::get( \ 256 Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ 257 break; \ 258 case ICV_FALSE: \ 259 ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ 260 break; \ 261 case ICV_LAST: \ 262 break; \ 263 } \ 264 } 265 #include "llvm/Frontend/OpenMP/OMPKinds.def" 266 } 267 268 /// Returns true if the function declaration \p F matches the runtime 269 /// function types, that is, return type \p RTFRetType, and argument types 270 /// \p RTFArgTypes. 271 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 272 SmallVector<Type *, 8> &RTFArgTypes) { 273 // TODO: We should output information to the user (under debug output 274 // and via remarks). 275 276 if (!F) 277 return false; 278 if (F->getReturnType() != RTFRetType) 279 return false; 280 if (F->arg_size() != RTFArgTypes.size()) 281 return false; 282 283 auto RTFTyIt = RTFArgTypes.begin(); 284 for (Argument &Arg : F->args()) { 285 if (Arg.getType() != *RTFTyIt) 286 return false; 287 288 ++RTFTyIt; 289 } 290 291 return true; 292 } 293 294 // Helper to collect all uses of the declaration in the UsesMap. 295 unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { 296 unsigned NumUses = 0; 297 if (!RFI.Declaration) 298 return NumUses; 299 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 300 301 if (CollectStats) { 302 NumOpenMPRuntimeFunctionsIdentified += 1; 303 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 304 } 305 306 // TODO: We directly convert uses into proper calls and unknown uses. 307 for (Use &U : RFI.Declaration->uses()) { 308 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 309 if (ModuleSlice.count(UserI->getFunction())) { 310 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 311 ++NumUses; 312 } 313 } else { 314 RFI.getOrCreateUseVector(nullptr).push_back(&U); 315 ++NumUses; 316 } 317 } 318 return NumUses; 319 } 320 321 // Helper function to recollect uses of a runtime function. 322 void recollectUsesForFunction(RuntimeFunction RTF) { 323 auto &RFI = RFIs[RTF]; 324 RFI.clearUsesMap(); 325 collectUses(RFI, /*CollectStats*/ false); 326 } 327 328 // Helper function to recollect uses of all runtime functions. 329 void recollectUses() { 330 for (int Idx = 0; Idx < RFIs.size(); ++Idx) 331 recollectUsesForFunction(static_cast<RuntimeFunction>(Idx)); 332 } 333 334 /// Helper to initialize all runtime function information for those defined 335 /// in OpenMPKinds.def. 336 void initializeRuntimeFunctions() { 337 Module &M = *((*ModuleSlice.begin())->getParent()); 338 339 // Helper macros for handling __VA_ARGS__ in OMP_RTL 340 #define OMP_TYPE(VarName, ...) \ 341 Type *VarName = OMPBuilder.VarName; \ 342 (void)VarName; 343 344 #define OMP_ARRAY_TYPE(VarName, ...) \ 345 ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ 346 (void)VarName##Ty; \ 347 PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ 348 (void)VarName##PtrTy; 349 350 #define OMP_FUNCTION_TYPE(VarName, ...) \ 351 FunctionType *VarName = OMPBuilder.VarName; \ 352 (void)VarName; \ 353 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 354 (void)VarName##Ptr; 355 356 #define OMP_STRUCT_TYPE(VarName, ...) \ 357 StructType *VarName = OMPBuilder.VarName; \ 358 (void)VarName; \ 359 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 360 (void)VarName##Ptr; 361 362 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 363 { \ 364 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 365 Function *F = M.getFunction(_Name); \ 366 if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ 367 auto &RFI = RFIs[_Enum]; \ 368 RFI.Kind = _Enum; \ 369 RFI.Name = _Name; \ 370 RFI.IsVarArg = _IsVarArg; \ 371 RFI.ReturnType = OMPBuilder._ReturnType; \ 372 RFI.ArgumentTypes = std::move(ArgsTypes); \ 373 RFI.Declaration = F; \ 374 unsigned NumUses = collectUses(RFI); \ 375 (void)NumUses; \ 376 LLVM_DEBUG({ \ 377 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 378 << " found\n"; \ 379 if (RFI.Declaration) \ 380 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 381 << RFI.getNumFunctionsWithUses() \ 382 << " different functions.\n"; \ 383 }); \ 384 } \ 385 } 386 #include "llvm/Frontend/OpenMP/OMPKinds.def" 387 388 // TODO: We should attach the attributes defined in OMPKinds.def. 389 } 390 391 /// Collection of known kernels (\see Kernel) in the module. 392 SmallPtrSetImpl<Kernel> &Kernels; 393 }; 394 395 /// Used to map the values physically (in the IR) stored in an offload 396 /// array, to a vector in memory. 397 struct OffloadArray { 398 /// Physical array (in the IR). 399 AllocaInst *Array = nullptr; 400 /// Mapped values. 401 SmallVector<Value *, 8> StoredValues; 402 /// Last stores made in the offload array. 403 SmallVector<StoreInst *, 8> LastAccesses; 404 405 OffloadArray() = default; 406 407 /// Initializes the OffloadArray with the values stored in \p Array before 408 /// instruction \p Before is reached. Returns false if the initialization 409 /// fails. 410 /// This MUST be used immediately after the construction of the object. 411 bool initialize(AllocaInst &Array, Instruction &Before) { 412 if (!Array.getAllocatedType()->isArrayTy()) 413 return false; 414 415 if (!getValues(Array, Before)) 416 return false; 417 418 this->Array = &Array; 419 return true; 420 } 421 422 static const unsigned DeviceIDArgNum = 1; 423 static const unsigned BasePtrsArgNum = 3; 424 static const unsigned PtrsArgNum = 4; 425 static const unsigned SizesArgNum = 5; 426 427 private: 428 /// Traverses the BasicBlock where \p Array is, collecting the stores made to 429 /// \p Array, leaving StoredValues with the values stored before the 430 /// instruction \p Before is reached. 431 bool getValues(AllocaInst &Array, Instruction &Before) { 432 // Initialize container. 433 const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements(); 434 StoredValues.assign(NumValues, nullptr); 435 LastAccesses.assign(NumValues, nullptr); 436 437 // TODO: This assumes the instruction \p Before is in the same 438 // BasicBlock as Array. Make it general, for any control flow graph. 439 BasicBlock *BB = Array.getParent(); 440 if (BB != Before.getParent()) 441 return false; 442 443 const DataLayout &DL = Array.getModule()->getDataLayout(); 444 const unsigned int PointerSize = DL.getPointerSize(); 445 446 for (Instruction &I : *BB) { 447 if (&I == &Before) 448 break; 449 450 if (!isa<StoreInst>(&I)) 451 continue; 452 453 auto *S = cast<StoreInst>(&I); 454 int64_t Offset = -1; 455 auto *Dst = 456 GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL); 457 if (Dst == &Array) { 458 int64_t Idx = Offset / PointerSize; 459 StoredValues[Idx] = getUnderlyingObject(S->getValueOperand()); 460 LastAccesses[Idx] = S; 461 } 462 } 463 464 return isFilled(); 465 } 466 467 /// Returns true if all values in StoredValues and 468 /// LastAccesses are not nullptrs. 469 bool isFilled() { 470 const unsigned NumValues = StoredValues.size(); 471 for (unsigned I = 0; I < NumValues; ++I) { 472 if (!StoredValues[I] || !LastAccesses[I]) 473 return false; 474 } 475 476 return true; 477 } 478 }; 479 480 struct OpenMPOpt { 481 482 using OptimizationRemarkGetter = 483 function_ref<OptimizationRemarkEmitter &(Function *)>; 484 485 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 486 OptimizationRemarkGetter OREGetter, 487 OMPInformationCache &OMPInfoCache, Attributor &A) 488 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 489 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 490 491 /// Check if any remarks are enabled for openmp-opt 492 bool remarksEnabled() { 493 auto &Ctx = M.getContext(); 494 return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE); 495 } 496 497 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 498 bool run(bool IsModulePass) { 499 if (SCC.empty()) 500 return false; 501 502 bool Changed = false; 503 504 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 505 << " functions in a slice with " 506 << OMPInfoCache.ModuleSlice.size() << " functions\n"); 507 508 if (IsModulePass) { 509 if (remarksEnabled()) 510 analysisGlobalization(); 511 } else { 512 if (PrintICVValues) 513 printICVs(); 514 if (PrintOpenMPKernels) 515 printKernels(); 516 517 Changed |= rewriteDeviceCodeStateMachine(); 518 519 Changed |= runAttributor(); 520 521 // Recollect uses, in case Attributor deleted any. 522 OMPInfoCache.recollectUses(); 523 524 Changed |= deleteParallelRegions(); 525 if (HideMemoryTransferLatency) 526 Changed |= hideMemTransfersLatency(); 527 Changed |= deduplicateRuntimeCalls(); 528 if (EnableParallelRegionMerging) { 529 if (mergeParallelRegions()) { 530 deduplicateRuntimeCalls(); 531 Changed = true; 532 } 533 } 534 } 535 536 return Changed; 537 } 538 539 /// Print initial ICV values for testing. 540 /// FIXME: This should be done from the Attributor once it is added. 541 void printICVs() const { 542 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel, 543 ICV_proc_bind}; 544 545 for (Function *F : OMPInfoCache.ModuleSlice) { 546 for (auto ICV : ICVs) { 547 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 548 auto Remark = [&](OptimizationRemark OR) { 549 return OR << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 550 << " Value: " 551 << (ICVInfo.InitValue 552 ? ICVInfo.InitValue->getValue().toString(10, true) 553 : "IMPLEMENTATION_DEFINED"); 554 }; 555 556 emitRemarkOnFunction(F, "OpenMPICVTracker", Remark); 557 } 558 } 559 } 560 561 /// Print OpenMP GPU kernels for testing. 562 void printKernels() const { 563 for (Function *F : SCC) { 564 if (!OMPInfoCache.Kernels.count(F)) 565 continue; 566 567 auto Remark = [&](OptimizationRemark OR) { 568 return OR << "OpenMP GPU kernel " 569 << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; 570 }; 571 572 emitRemarkOnFunction(F, "OpenMPGPU", Remark); 573 } 574 } 575 576 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 577 /// given it has to be the callee or a nullptr is returned. 578 static CallInst *getCallIfRegularCall( 579 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 580 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 581 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 582 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 583 return CI; 584 return nullptr; 585 } 586 587 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 588 /// the callee or a nullptr is returned. 589 static CallInst *getCallIfRegularCall( 590 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 591 CallInst *CI = dyn_cast<CallInst>(&V); 592 if (CI && !CI->hasOperandBundles() && 593 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 594 return CI; 595 return nullptr; 596 } 597 598 private: 599 /// Merge parallel regions when it is safe. 600 bool mergeParallelRegions() { 601 const unsigned CallbackCalleeOperand = 2; 602 const unsigned CallbackFirstArgOperand = 3; 603 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 604 605 // Check if there are any __kmpc_fork_call calls to merge. 606 OMPInformationCache::RuntimeFunctionInfo &RFI = 607 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 608 609 if (!RFI.Declaration) 610 return false; 611 612 // Unmergable calls that prevent merging a parallel region. 613 OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = { 614 OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind], 615 OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads], 616 }; 617 618 bool Changed = false; 619 LoopInfo *LI = nullptr; 620 DominatorTree *DT = nullptr; 621 622 SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap; 623 624 BasicBlock *StartBB = nullptr, *EndBB = nullptr; 625 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 626 BasicBlock &ContinuationIP) { 627 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 628 BasicBlock *CGEndBB = 629 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 630 assert(StartBB != nullptr && "StartBB should not be null"); 631 CGStartBB->getTerminator()->setSuccessor(0, StartBB); 632 assert(EndBB != nullptr && "EndBB should not be null"); 633 EndBB->getTerminator()->setSuccessor(0, CGEndBB); 634 }; 635 636 auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &, 637 Value &Inner, Value *&ReplacementValue) -> InsertPointTy { 638 ReplacementValue = &Inner; 639 return CodeGenIP; 640 }; 641 642 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 643 644 /// Create a sequential execution region within a merged parallel region, 645 /// encapsulated in a master construct with a barrier for synchronization. 646 auto CreateSequentialRegion = [&](Function *OuterFn, 647 BasicBlock *OuterPredBB, 648 Instruction *SeqStartI, 649 Instruction *SeqEndI) { 650 // Isolate the instructions of the sequential region to a separate 651 // block. 652 BasicBlock *ParentBB = SeqStartI->getParent(); 653 BasicBlock *SeqEndBB = 654 SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI); 655 BasicBlock *SeqAfterBB = 656 SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI); 657 BasicBlock *SeqStartBB = 658 SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged"); 659 660 assert(ParentBB->getUniqueSuccessor() == SeqStartBB && 661 "Expected a different CFG"); 662 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 663 ParentBB->getTerminator()->eraseFromParent(); 664 665 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 666 BasicBlock &ContinuationIP) { 667 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 668 BasicBlock *CGEndBB = 669 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 670 assert(SeqStartBB != nullptr && "SeqStartBB should not be null"); 671 CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB); 672 assert(SeqEndBB != nullptr && "SeqEndBB should not be null"); 673 SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB); 674 }; 675 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 676 677 // Find outputs from the sequential region to outside users and 678 // broadcast their values to them. 679 for (Instruction &I : *SeqStartBB) { 680 SmallPtrSet<Instruction *, 4> OutsideUsers; 681 for (User *Usr : I.users()) { 682 Instruction &UsrI = *cast<Instruction>(Usr); 683 // Ignore outputs to LT intrinsics, code extraction for the merged 684 // parallel region will fix them. 685 if (UsrI.isLifetimeStartOrEnd()) 686 continue; 687 688 if (UsrI.getParent() != SeqStartBB) 689 OutsideUsers.insert(&UsrI); 690 } 691 692 if (OutsideUsers.empty()) 693 continue; 694 695 // Emit an alloca in the outer region to store the broadcasted 696 // value. 697 const DataLayout &DL = M.getDataLayout(); 698 AllocaInst *AllocaI = new AllocaInst( 699 I.getType(), DL.getAllocaAddrSpace(), nullptr, 700 I.getName() + ".seq.output.alloc", &OuterFn->front().front()); 701 702 // Emit a store instruction in the sequential BB to update the 703 // value. 704 new StoreInst(&I, AllocaI, SeqStartBB->getTerminator()); 705 706 // Emit a load instruction and replace the use of the output value 707 // with it. 708 for (Instruction *UsrI : OutsideUsers) { 709 LoadInst *LoadI = new LoadInst( 710 I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI); 711 UsrI->replaceUsesOfWith(&I, LoadI); 712 } 713 } 714 715 OpenMPIRBuilder::LocationDescription Loc( 716 InsertPointTy(ParentBB, ParentBB->end()), DL); 717 InsertPointTy SeqAfterIP = 718 OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB); 719 720 OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel); 721 722 BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock()); 723 724 LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn 725 << "\n"); 726 }; 727 728 // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all 729 // contained in BB and only separated by instructions that can be 730 // redundantly executed in parallel. The block BB is split before the first 731 // call (in MergableCIs) and after the last so the entire region we merge 732 // into a single parallel region is contained in a single basic block 733 // without any other instructions. We use the OpenMPIRBuilder to outline 734 // that block and call the resulting function via __kmpc_fork_call. 735 auto Merge = [&](SmallVectorImpl<CallInst *> &MergableCIs, BasicBlock *BB) { 736 // TODO: Change the interface to allow single CIs expanded, e.g, to 737 // include an outer loop. 738 assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs"); 739 740 auto Remark = [&](OptimizationRemark OR) { 741 OR << "Parallel region at " 742 << ore::NV("OpenMPParallelMergeFront", 743 MergableCIs.front()->getDebugLoc()) 744 << " merged with parallel regions at "; 745 for (auto *CI : llvm::drop_begin(MergableCIs)) { 746 OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()); 747 if (CI != MergableCIs.back()) 748 OR << ", "; 749 } 750 return OR; 751 }; 752 753 emitRemark<OptimizationRemark>(MergableCIs.front(), 754 "OpenMPParallelRegionMerging", Remark); 755 756 Function *OriginalFn = BB->getParent(); 757 LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size() 758 << " parallel regions in " << OriginalFn->getName() 759 << "\n"); 760 761 // Isolate the calls to merge in a separate block. 762 EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI); 763 BasicBlock *AfterBB = 764 SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI); 765 StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr, 766 "omp.par.merged"); 767 768 assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG"); 769 const DebugLoc DL = BB->getTerminator()->getDebugLoc(); 770 BB->getTerminator()->eraseFromParent(); 771 772 // Create sequential regions for sequential instructions that are 773 // in-between mergable parallel regions. 774 for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1; 775 It != End; ++It) { 776 Instruction *ForkCI = *It; 777 Instruction *NextForkCI = *(It + 1); 778 779 // Continue if there are not in-between instructions. 780 if (ForkCI->getNextNode() == NextForkCI) 781 continue; 782 783 CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(), 784 NextForkCI->getPrevNode()); 785 } 786 787 OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()), 788 DL); 789 IRBuilder<>::InsertPoint AllocaIP( 790 &OriginalFn->getEntryBlock(), 791 OriginalFn->getEntryBlock().getFirstInsertionPt()); 792 // Create the merged parallel region with default proc binding, to 793 // avoid overriding binding settings, and without explicit cancellation. 794 InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel( 795 Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr, 796 OMP_PROC_BIND_default, /* IsCancellable */ false); 797 BranchInst::Create(AfterBB, AfterIP.getBlock()); 798 799 // Perform the actual outlining. 800 OMPInfoCache.OMPBuilder.finalize(OriginalFn, 801 /* AllowExtractorSinking */ true); 802 803 Function *OutlinedFn = MergableCIs.front()->getCaller(); 804 805 // Replace the __kmpc_fork_call calls with direct calls to the outlined 806 // callbacks. 807 SmallVector<Value *, 8> Args; 808 for (auto *CI : MergableCIs) { 809 Value *Callee = 810 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts(); 811 FunctionType *FT = 812 cast<FunctionType>(Callee->getType()->getPointerElementType()); 813 Args.clear(); 814 Args.push_back(OutlinedFn->getArg(0)); 815 Args.push_back(OutlinedFn->getArg(1)); 816 for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands(); 817 U < E; ++U) 818 Args.push_back(CI->getArgOperand(U)); 819 820 CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI); 821 if (CI->getDebugLoc()) 822 NewCI->setDebugLoc(CI->getDebugLoc()); 823 824 // Forward parameter attributes from the callback to the callee. 825 for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands(); 826 U < E; ++U) 827 for (const Attribute &A : CI->getAttributes().getParamAttributes(U)) 828 NewCI->addParamAttr( 829 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A); 830 831 // Emit an explicit barrier to replace the implicit fork-join barrier. 832 if (CI != MergableCIs.back()) { 833 // TODO: Remove barrier if the merged parallel region includes the 834 // 'nowait' clause. 835 OMPInfoCache.OMPBuilder.createBarrier( 836 InsertPointTy(NewCI->getParent(), 837 NewCI->getNextNode()->getIterator()), 838 OMPD_parallel); 839 } 840 841 auto Remark = [&](OptimizationRemark OR) { 842 return OR << "Parallel region at " 843 << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()) 844 << " merged with " 845 << ore::NV("OpenMPParallelMergeFront", 846 MergableCIs.front()->getDebugLoc()); 847 }; 848 if (CI != MergableCIs.front()) 849 emitRemark<OptimizationRemark>(CI, "OpenMPParallelRegionMerging", 850 Remark); 851 852 CI->eraseFromParent(); 853 } 854 855 assert(OutlinedFn != OriginalFn && "Outlining failed"); 856 CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); 857 CGUpdater.reanalyzeFunction(*OriginalFn); 858 859 NumOpenMPParallelRegionsMerged += MergableCIs.size(); 860 861 return true; 862 }; 863 864 // Helper function that identifes sequences of 865 // __kmpc_fork_call uses in a basic block. 866 auto DetectPRsCB = [&](Use &U, Function &F) { 867 CallInst *CI = getCallIfRegularCall(U, &RFI); 868 BB2PRMap[CI->getParent()].insert(CI); 869 870 return false; 871 }; 872 873 BB2PRMap.clear(); 874 RFI.foreachUse(SCC, DetectPRsCB); 875 SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector; 876 // Find mergable parallel regions within a basic block that are 877 // safe to merge, that is any in-between instructions can safely 878 // execute in parallel after merging. 879 // TODO: support merging across basic-blocks. 880 for (auto &It : BB2PRMap) { 881 auto &CIs = It.getSecond(); 882 if (CIs.size() < 2) 883 continue; 884 885 BasicBlock *BB = It.getFirst(); 886 SmallVector<CallInst *, 4> MergableCIs; 887 888 /// Returns true if the instruction is mergable, false otherwise. 889 /// A terminator instruction is unmergable by definition since merging 890 /// works within a BB. Instructions before the mergable region are 891 /// mergable if they are not calls to OpenMP runtime functions that may 892 /// set different execution parameters for subsequent parallel regions. 893 /// Instructions in-between parallel regions are mergable if they are not 894 /// calls to any non-intrinsic function since that may call a non-mergable 895 /// OpenMP runtime function. 896 auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) { 897 // We do not merge across BBs, hence return false (unmergable) if the 898 // instruction is a terminator. 899 if (I.isTerminator()) 900 return false; 901 902 if (!isa<CallInst>(&I)) 903 return true; 904 905 CallInst *CI = cast<CallInst>(&I); 906 if (IsBeforeMergableRegion) { 907 Function *CalledFunction = CI->getCalledFunction(); 908 if (!CalledFunction) 909 return false; 910 // Return false (unmergable) if the call before the parallel 911 // region calls an explicit affinity (proc_bind) or number of 912 // threads (num_threads) compiler-generated function. Those settings 913 // may be incompatible with following parallel regions. 914 // TODO: ICV tracking to detect compatibility. 915 for (const auto &RFI : UnmergableCallsInfo) { 916 if (CalledFunction == RFI.Declaration) 917 return false; 918 } 919 } else { 920 // Return false (unmergable) if there is a call instruction 921 // in-between parallel regions when it is not an intrinsic. It 922 // may call an unmergable OpenMP runtime function in its callpath. 923 // TODO: Keep track of possible OpenMP calls in the callpath. 924 if (!isa<IntrinsicInst>(CI)) 925 return false; 926 } 927 928 return true; 929 }; 930 // Find maximal number of parallel region CIs that are safe to merge. 931 for (auto It = BB->begin(), End = BB->end(); It != End;) { 932 Instruction &I = *It; 933 ++It; 934 935 if (CIs.count(&I)) { 936 MergableCIs.push_back(cast<CallInst>(&I)); 937 continue; 938 } 939 940 // Continue expanding if the instruction is mergable. 941 if (IsMergable(I, MergableCIs.empty())) 942 continue; 943 944 // Forward the instruction iterator to skip the next parallel region 945 // since there is an unmergable instruction which can affect it. 946 for (; It != End; ++It) { 947 Instruction &SkipI = *It; 948 if (CIs.count(&SkipI)) { 949 LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI 950 << " due to " << I << "\n"); 951 ++It; 952 break; 953 } 954 } 955 956 // Store mergable regions found. 957 if (MergableCIs.size() > 1) { 958 MergableCIsVector.push_back(MergableCIs); 959 LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size() 960 << " parallel regions in block " << BB->getName() 961 << " of function " << BB->getParent()->getName() 962 << "\n";); 963 } 964 965 MergableCIs.clear(); 966 } 967 968 if (!MergableCIsVector.empty()) { 969 Changed = true; 970 971 for (auto &MergableCIs : MergableCIsVector) 972 Merge(MergableCIs, BB); 973 MergableCIsVector.clear(); 974 } 975 } 976 977 if (Changed) { 978 /// Re-collect use for fork calls, emitted barrier calls, and 979 /// any emitted master/end_master calls. 980 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call); 981 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier); 982 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master); 983 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master); 984 } 985 986 return Changed; 987 } 988 989 /// Try to delete parallel regions if possible. 990 bool deleteParallelRegions() { 991 const unsigned CallbackCalleeOperand = 2; 992 993 OMPInformationCache::RuntimeFunctionInfo &RFI = 994 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 995 996 if (!RFI.Declaration) 997 return false; 998 999 bool Changed = false; 1000 auto DeleteCallCB = [&](Use &U, Function &) { 1001 CallInst *CI = getCallIfRegularCall(U); 1002 if (!CI) 1003 return false; 1004 auto *Fn = dyn_cast<Function>( 1005 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 1006 if (!Fn) 1007 return false; 1008 if (!Fn->onlyReadsMemory()) 1009 return false; 1010 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 1011 return false; 1012 1013 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 1014 << CI->getCaller()->getName() << "\n"); 1015 1016 auto Remark = [&](OptimizationRemark OR) { 1017 return OR << "Parallel region in " 1018 << ore::NV("OpenMPParallelDelete", CI->getCaller()->getName()) 1019 << " deleted"; 1020 }; 1021 emitRemark<OptimizationRemark>(CI, "OpenMPParallelRegionDeletion", 1022 Remark); 1023 1024 CGUpdater.removeCallSite(*CI); 1025 CI->eraseFromParent(); 1026 Changed = true; 1027 ++NumOpenMPParallelRegionsDeleted; 1028 return true; 1029 }; 1030 1031 RFI.foreachUse(SCC, DeleteCallCB); 1032 1033 return Changed; 1034 } 1035 1036 /// Try to eliminate runtime calls by reusing existing ones. 1037 bool deduplicateRuntimeCalls() { 1038 bool Changed = false; 1039 1040 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 1041 OMPRTL_omp_get_num_threads, 1042 OMPRTL_omp_in_parallel, 1043 OMPRTL_omp_get_cancellation, 1044 OMPRTL_omp_get_thread_limit, 1045 OMPRTL_omp_get_supported_active_levels, 1046 OMPRTL_omp_get_level, 1047 OMPRTL_omp_get_ancestor_thread_num, 1048 OMPRTL_omp_get_team_size, 1049 OMPRTL_omp_get_active_level, 1050 OMPRTL_omp_in_final, 1051 OMPRTL_omp_get_proc_bind, 1052 OMPRTL_omp_get_num_places, 1053 OMPRTL_omp_get_num_procs, 1054 OMPRTL_omp_get_place_num, 1055 OMPRTL_omp_get_partition_num_places, 1056 OMPRTL_omp_get_partition_place_nums}; 1057 1058 // Global-tid is handled separately. 1059 SmallSetVector<Value *, 16> GTIdArgs; 1060 collectGlobalThreadIdArguments(GTIdArgs); 1061 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 1062 << " global thread ID arguments\n"); 1063 1064 for (Function *F : SCC) { 1065 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 1066 Changed |= deduplicateRuntimeCalls( 1067 *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 1068 1069 // __kmpc_global_thread_num is special as we can replace it with an 1070 // argument in enough cases to make it worth trying. 1071 Value *GTIdArg = nullptr; 1072 for (Argument &Arg : F->args()) 1073 if (GTIdArgs.count(&Arg)) { 1074 GTIdArg = &Arg; 1075 break; 1076 } 1077 Changed |= deduplicateRuntimeCalls( 1078 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 1079 } 1080 1081 return Changed; 1082 } 1083 1084 /// Tries to hide the latency of runtime calls that involve host to 1085 /// device memory transfers by splitting them into their "issue" and "wait" 1086 /// versions. The "issue" is moved upwards as much as possible. The "wait" is 1087 /// moved downards as much as possible. The "issue" issues the memory transfer 1088 /// asynchronously, returning a handle. The "wait" waits in the returned 1089 /// handle for the memory transfer to finish. 1090 bool hideMemTransfersLatency() { 1091 auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; 1092 bool Changed = false; 1093 auto SplitMemTransfers = [&](Use &U, Function &Decl) { 1094 auto *RTCall = getCallIfRegularCall(U, &RFI); 1095 if (!RTCall) 1096 return false; 1097 1098 OffloadArray OffloadArrays[3]; 1099 if (!getValuesInOffloadArrays(*RTCall, OffloadArrays)) 1100 return false; 1101 1102 LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays)); 1103 1104 // TODO: Check if can be moved upwards. 1105 bool WasSplit = false; 1106 Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); 1107 if (WaitMovementPoint) 1108 WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); 1109 1110 Changed |= WasSplit; 1111 return WasSplit; 1112 }; 1113 RFI.foreachUse(SCC, SplitMemTransfers); 1114 1115 return Changed; 1116 } 1117 1118 void analysisGlobalization() { 1119 RuntimeFunction GlobalizationRuntimeIDs[] = { 1120 OMPRTL___kmpc_data_sharing_coalesced_push_stack, 1121 OMPRTL___kmpc_data_sharing_push_stack}; 1122 1123 for (const auto GlobalizationCallID : GlobalizationRuntimeIDs) { 1124 auto &RFI = OMPInfoCache.RFIs[GlobalizationCallID]; 1125 1126 auto CheckGlobalization = [&](Use &U, Function &Decl) { 1127 if (CallInst *CI = getCallIfRegularCall(U, &RFI)) { 1128 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 1129 return ORA 1130 << "Found thread data sharing on the GPU. " 1131 << "Expect degraded performance due to data globalization."; 1132 }; 1133 emitRemark<OptimizationRemarkAnalysis>(CI, "OpenMPGlobalization", 1134 Remark); 1135 } 1136 1137 return false; 1138 }; 1139 1140 RFI.foreachUse(SCC, CheckGlobalization); 1141 } 1142 } 1143 1144 /// Maps the values stored in the offload arrays passed as arguments to 1145 /// \p RuntimeCall into the offload arrays in \p OAs. 1146 bool getValuesInOffloadArrays(CallInst &RuntimeCall, 1147 MutableArrayRef<OffloadArray> OAs) { 1148 assert(OAs.size() == 3 && "Need space for three offload arrays!"); 1149 1150 // A runtime call that involves memory offloading looks something like: 1151 // call void @__tgt_target_data_begin_mapper(arg0, arg1, 1152 // i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes, 1153 // ...) 1154 // So, the idea is to access the allocas that allocate space for these 1155 // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes. 1156 // Therefore: 1157 // i8** %offload_baseptrs. 1158 Value *BasePtrsArg = 1159 RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum); 1160 // i8** %offload_ptrs. 1161 Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum); 1162 // i8** %offload_sizes. 1163 Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum); 1164 1165 // Get values stored in **offload_baseptrs. 1166 auto *V = getUnderlyingObject(BasePtrsArg); 1167 if (!isa<AllocaInst>(V)) 1168 return false; 1169 auto *BasePtrsArray = cast<AllocaInst>(V); 1170 if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall)) 1171 return false; 1172 1173 // Get values stored in **offload_baseptrs. 1174 V = getUnderlyingObject(PtrsArg); 1175 if (!isa<AllocaInst>(V)) 1176 return false; 1177 auto *PtrsArray = cast<AllocaInst>(V); 1178 if (!OAs[1].initialize(*PtrsArray, RuntimeCall)) 1179 return false; 1180 1181 // Get values stored in **offload_sizes. 1182 V = getUnderlyingObject(SizesArg); 1183 // If it's a [constant] global array don't analyze it. 1184 if (isa<GlobalValue>(V)) 1185 return isa<Constant>(V); 1186 if (!isa<AllocaInst>(V)) 1187 return false; 1188 1189 auto *SizesArray = cast<AllocaInst>(V); 1190 if (!OAs[2].initialize(*SizesArray, RuntimeCall)) 1191 return false; 1192 1193 return true; 1194 } 1195 1196 /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG. 1197 /// For now this is a way to test that the function getValuesInOffloadArrays 1198 /// is working properly. 1199 /// TODO: Move this to a unittest when unittests are available for OpenMPOpt. 1200 void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) { 1201 assert(OAs.size() == 3 && "There are three offload arrays to debug!"); 1202 1203 LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n"); 1204 std::string ValuesStr; 1205 raw_string_ostream Printer(ValuesStr); 1206 std::string Separator = " --- "; 1207 1208 for (auto *BP : OAs[0].StoredValues) { 1209 BP->print(Printer); 1210 Printer << Separator; 1211 } 1212 LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n"); 1213 ValuesStr.clear(); 1214 1215 for (auto *P : OAs[1].StoredValues) { 1216 P->print(Printer); 1217 Printer << Separator; 1218 } 1219 LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n"); 1220 ValuesStr.clear(); 1221 1222 for (auto *S : OAs[2].StoredValues) { 1223 S->print(Printer); 1224 Printer << Separator; 1225 } 1226 LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n"); 1227 } 1228 1229 /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be 1230 /// moved. Returns nullptr if the movement is not possible, or not worth it. 1231 Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { 1232 // FIXME: This traverses only the BasicBlock where RuntimeCall is. 1233 // Make it traverse the CFG. 1234 1235 Instruction *CurrentI = &RuntimeCall; 1236 bool IsWorthIt = false; 1237 while ((CurrentI = CurrentI->getNextNode())) { 1238 1239 // TODO: Once we detect the regions to be offloaded we should use the 1240 // alias analysis manager to check if CurrentI may modify one of 1241 // the offloaded regions. 1242 if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { 1243 if (IsWorthIt) 1244 return CurrentI; 1245 1246 return nullptr; 1247 } 1248 1249 // FIXME: For now if we move it over anything without side effect 1250 // is worth it. 1251 IsWorthIt = true; 1252 } 1253 1254 // Return end of BasicBlock. 1255 return RuntimeCall.getParent()->getTerminator(); 1256 } 1257 1258 /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. 1259 bool splitTargetDataBeginRTC(CallInst &RuntimeCall, 1260 Instruction &WaitMovementPoint) { 1261 // Create stack allocated handle (__tgt_async_info) at the beginning of the 1262 // function. Used for storing information of the async transfer, allowing to 1263 // wait on it later. 1264 auto &IRBuilder = OMPInfoCache.OMPBuilder; 1265 auto *F = RuntimeCall.getCaller(); 1266 Instruction *FirstInst = &(F->getEntryBlock().front()); 1267 AllocaInst *Handle = new AllocaInst( 1268 IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst); 1269 1270 // Add "issue" runtime call declaration: 1271 // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, 1272 // i8**, i8**, i64*, i64*) 1273 FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( 1274 M, OMPRTL___tgt_target_data_begin_mapper_issue); 1275 1276 // Change RuntimeCall call site for its asynchronous version. 1277 SmallVector<Value *, 16> Args; 1278 for (auto &Arg : RuntimeCall.args()) 1279 Args.push_back(Arg.get()); 1280 Args.push_back(Handle); 1281 1282 CallInst *IssueCallsite = 1283 CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall); 1284 RuntimeCall.eraseFromParent(); 1285 1286 // Add "wait" runtime call declaration: 1287 // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) 1288 FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( 1289 M, OMPRTL___tgt_target_data_begin_mapper_wait); 1290 1291 Value *WaitParams[2] = { 1292 IssueCallsite->getArgOperand( 1293 OffloadArray::DeviceIDArgNum), // device_id. 1294 Handle // handle to wait on. 1295 }; 1296 CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); 1297 1298 return true; 1299 } 1300 1301 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 1302 bool GlobalOnly, bool &SingleChoice) { 1303 if (CurrentIdent == NextIdent) 1304 return CurrentIdent; 1305 1306 // TODO: Figure out how to actually combine multiple debug locations. For 1307 // now we just keep an existing one if there is a single choice. 1308 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 1309 SingleChoice = !CurrentIdent; 1310 return NextIdent; 1311 } 1312 return nullptr; 1313 } 1314 1315 /// Return an `struct ident_t*` value that represents the ones used in the 1316 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 1317 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 1318 /// return value we create one from scratch. We also do not yet combine 1319 /// information, e.g., the source locations, see combinedIdentStruct. 1320 Value * 1321 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 1322 Function &F, bool GlobalOnly) { 1323 bool SingleChoice = true; 1324 Value *Ident = nullptr; 1325 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 1326 CallInst *CI = getCallIfRegularCall(U, &RFI); 1327 if (!CI || &F != &Caller) 1328 return false; 1329 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 1330 /* GlobalOnly */ true, SingleChoice); 1331 return false; 1332 }; 1333 RFI.foreachUse(SCC, CombineIdentStruct); 1334 1335 if (!Ident || !SingleChoice) { 1336 // The IRBuilder uses the insertion block to get to the module, this is 1337 // unfortunate but we work around it for now. 1338 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 1339 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 1340 &F.getEntryBlock(), F.getEntryBlock().begin())); 1341 // Create a fallback location if non was found. 1342 // TODO: Use the debug locations of the calls instead. 1343 Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(); 1344 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc); 1345 } 1346 return Ident; 1347 } 1348 1349 /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or 1350 /// \p ReplVal if given. 1351 bool deduplicateRuntimeCalls(Function &F, 1352 OMPInformationCache::RuntimeFunctionInfo &RFI, 1353 Value *ReplVal = nullptr) { 1354 auto *UV = RFI.getUseVector(F); 1355 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 1356 return false; 1357 1358 LLVM_DEBUG( 1359 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 1360 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 1361 1362 assert((!ReplVal || (isa<Argument>(ReplVal) && 1363 cast<Argument>(ReplVal)->getParent() == &F)) && 1364 "Unexpected replacement value!"); 1365 1366 // TODO: Use dominance to find a good position instead. 1367 auto CanBeMoved = [this](CallBase &CB) { 1368 unsigned NumArgs = CB.getNumArgOperands(); 1369 if (NumArgs == 0) 1370 return true; 1371 if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) 1372 return false; 1373 for (unsigned u = 1; u < NumArgs; ++u) 1374 if (isa<Instruction>(CB.getArgOperand(u))) 1375 return false; 1376 return true; 1377 }; 1378 1379 if (!ReplVal) { 1380 for (Use *U : *UV) 1381 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 1382 if (!CanBeMoved(*CI)) 1383 continue; 1384 1385 auto Remark = [&](OptimizationRemark OR) { 1386 auto newLoc = &*F.getEntryBlock().getFirstInsertionPt(); 1387 return OR << "OpenMP runtime call " 1388 << ore::NV("OpenMPOptRuntime", RFI.Name) << " moved to " 1389 << ore::NV("OpenMPRuntimeMoves", newLoc->getDebugLoc()); 1390 }; 1391 emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeCodeMotion", Remark); 1392 1393 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 1394 ReplVal = CI; 1395 break; 1396 } 1397 if (!ReplVal) 1398 return false; 1399 } 1400 1401 // If we use a call as a replacement value we need to make sure the ident is 1402 // valid at the new location. For now we just pick a global one, either 1403 // existing and used by one of the calls, or created from scratch. 1404 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 1405 if (CI->getNumArgOperands() > 0 && 1406 CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { 1407 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 1408 /* GlobalOnly */ true); 1409 CI->setArgOperand(0, Ident); 1410 } 1411 } 1412 1413 bool Changed = false; 1414 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 1415 CallInst *CI = getCallIfRegularCall(U, &RFI); 1416 if (!CI || CI == ReplVal || &F != &Caller) 1417 return false; 1418 assert(CI->getCaller() == &F && "Unexpected call!"); 1419 1420 auto Remark = [&](OptimizationRemark OR) { 1421 return OR << "OpenMP runtime call " 1422 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated"; 1423 }; 1424 emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeDeduplicated", Remark); 1425 1426 CGUpdater.removeCallSite(*CI); 1427 CI->replaceAllUsesWith(ReplVal); 1428 CI->eraseFromParent(); 1429 ++NumOpenMPRuntimeCallsDeduplicated; 1430 Changed = true; 1431 return true; 1432 }; 1433 RFI.foreachUse(SCC, ReplaceAndDeleteCB); 1434 1435 return Changed; 1436 } 1437 1438 /// Collect arguments that represent the global thread id in \p GTIdArgs. 1439 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 1440 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 1441 // initialization. We could define an AbstractAttribute instead and 1442 // run the Attributor here once it can be run as an SCC pass. 1443 1444 // Helper to check the argument \p ArgNo at all call sites of \p F for 1445 // a GTId. 1446 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 1447 if (!F.hasLocalLinkage()) 1448 return false; 1449 for (Use &U : F.uses()) { 1450 if (CallInst *CI = getCallIfRegularCall(U)) { 1451 Value *ArgOp = CI->getArgOperand(ArgNo); 1452 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 1453 getCallIfRegularCall( 1454 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 1455 continue; 1456 } 1457 return false; 1458 } 1459 return true; 1460 }; 1461 1462 // Helper to identify uses of a GTId as GTId arguments. 1463 auto AddUserArgs = [&](Value >Id) { 1464 for (Use &U : GTId.uses()) 1465 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 1466 if (CI->isArgOperand(&U)) 1467 if (Function *Callee = CI->getCalledFunction()) 1468 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 1469 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 1470 }; 1471 1472 // The argument users of __kmpc_global_thread_num calls are GTIds. 1473 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 1474 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 1475 1476 GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { 1477 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 1478 AddUserArgs(*CI); 1479 return false; 1480 }); 1481 1482 // Transitively search for more arguments by looking at the users of the 1483 // ones we know already. During the search the GTIdArgs vector is extended 1484 // so we cannot cache the size nor can we use a range based for. 1485 for (unsigned u = 0; u < GTIdArgs.size(); ++u) 1486 AddUserArgs(*GTIdArgs[u]); 1487 } 1488 1489 /// Kernel (=GPU) optimizations and utility functions 1490 /// 1491 ///{{ 1492 1493 /// Check if \p F is a kernel, hence entry point for target offloading. 1494 bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); } 1495 1496 /// Cache to remember the unique kernel for a function. 1497 DenseMap<Function *, Optional<Kernel>> UniqueKernelMap; 1498 1499 /// Find the unique kernel that will execute \p F, if any. 1500 Kernel getUniqueKernelFor(Function &F); 1501 1502 /// Find the unique kernel that will execute \p I, if any. 1503 Kernel getUniqueKernelFor(Instruction &I) { 1504 return getUniqueKernelFor(*I.getFunction()); 1505 } 1506 1507 /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in 1508 /// the cases we can avoid taking the address of a function. 1509 bool rewriteDeviceCodeStateMachine(); 1510 1511 /// 1512 ///}} 1513 1514 /// Emit a remark generically 1515 /// 1516 /// This template function can be used to generically emit a remark. The 1517 /// RemarkKind should be one of the following: 1518 /// - OptimizationRemark to indicate a successful optimization attempt 1519 /// - OptimizationRemarkMissed to report a failed optimization attempt 1520 /// - OptimizationRemarkAnalysis to provide additional information about an 1521 /// optimization attempt 1522 /// 1523 /// The remark is built using a callback function provided by the caller that 1524 /// takes a RemarkKind as input and returns a RemarkKind. 1525 template <typename RemarkKind, 1526 typename RemarkCallBack = function_ref<RemarkKind(RemarkKind &&)>> 1527 void emitRemark(Instruction *Inst, StringRef RemarkName, 1528 RemarkCallBack &&RemarkCB) const { 1529 Function *F = Inst->getParent()->getParent(); 1530 auto &ORE = OREGetter(F); 1531 1532 ORE.emit( 1533 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, Inst)); }); 1534 } 1535 1536 /// Emit a remark on a function. Since only OptimizationRemark is supporting 1537 /// this, it can't be made generic. 1538 void 1539 emitRemarkOnFunction(Function *F, StringRef RemarkName, 1540 function_ref<OptimizationRemark(OptimizationRemark &&)> 1541 &&RemarkCB) const { 1542 auto &ORE = OREGetter(F); 1543 1544 ORE.emit([&]() { 1545 return RemarkCB(OptimizationRemark(DEBUG_TYPE, RemarkName, F)); 1546 }); 1547 } 1548 1549 /// The underlying module. 1550 Module &M; 1551 1552 /// The SCC we are operating on. 1553 SmallVectorImpl<Function *> &SCC; 1554 1555 /// Callback to update the call graph, the first argument is a removed call, 1556 /// the second an optional replacement call. 1557 CallGraphUpdater &CGUpdater; 1558 1559 /// Callback to get an OptimizationRemarkEmitter from a Function * 1560 OptimizationRemarkGetter OREGetter; 1561 1562 /// OpenMP-specific information cache. Also Used for Attributor runs. 1563 OMPInformationCache &OMPInfoCache; 1564 1565 /// Attributor instance. 1566 Attributor &A; 1567 1568 /// Helper function to run Attributor on SCC. 1569 bool runAttributor() { 1570 if (SCC.empty()) 1571 return false; 1572 1573 registerAAs(); 1574 1575 ChangeStatus Changed = A.run(); 1576 1577 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 1578 << " functions, result: " << Changed << ".\n"); 1579 1580 return Changed == ChangeStatus::CHANGED; 1581 } 1582 1583 /// Populate the Attributor with abstract attribute opportunities in the 1584 /// function. 1585 void registerAAs() { 1586 if (SCC.empty()) 1587 return; 1588 1589 // Create CallSite AA for all Getters. 1590 for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { 1591 auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)]; 1592 1593 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 1594 1595 auto CreateAA = [&](Use &U, Function &Caller) { 1596 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 1597 if (!CI) 1598 return false; 1599 1600 auto &CB = cast<CallBase>(*CI); 1601 1602 IRPosition CBPos = IRPosition::callsite_function(CB); 1603 A.getOrCreateAAFor<AAICVTracker>(CBPos); 1604 return false; 1605 }; 1606 1607 GetterRFI.foreachUse(SCC, CreateAA); 1608 } 1609 } 1610 }; 1611 1612 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { 1613 if (!OMPInfoCache.ModuleSlice.count(&F)) 1614 return nullptr; 1615 1616 // Use a scope to keep the lifetime of the CachedKernel short. 1617 { 1618 Optional<Kernel> &CachedKernel = UniqueKernelMap[&F]; 1619 if (CachedKernel) 1620 return *CachedKernel; 1621 1622 // TODO: We should use an AA to create an (optimistic and callback 1623 // call-aware) call graph. For now we stick to simple patterns that 1624 // are less powerful, basically the worst fixpoint. 1625 if (isKernel(F)) { 1626 CachedKernel = Kernel(&F); 1627 return *CachedKernel; 1628 } 1629 1630 CachedKernel = nullptr; 1631 if (!F.hasLocalLinkage()) { 1632 1633 // See https://openmp.llvm.org/remarks/OptimizationRemarks.html 1634 auto Remark = [&](OptimizationRemark OR) { 1635 return OR << "[OMP100] Potentially unknown OpenMP target region caller"; 1636 }; 1637 emitRemarkOnFunction(&F, "OMP100", Remark); 1638 1639 return nullptr; 1640 } 1641 } 1642 1643 auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { 1644 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 1645 // Allow use in equality comparisons. 1646 if (Cmp->isEquality()) 1647 return getUniqueKernelFor(*Cmp); 1648 return nullptr; 1649 } 1650 if (auto *CB = dyn_cast<CallBase>(U.getUser())) { 1651 // Allow direct calls. 1652 if (CB->isCallee(&U)) 1653 return getUniqueKernelFor(*CB); 1654 // Allow the use in __kmpc_kernel_prepare_parallel calls. 1655 if (Function *Callee = CB->getCalledFunction()) 1656 if (Callee->getName() == "__kmpc_kernel_prepare_parallel") 1657 return getUniqueKernelFor(*CB); 1658 return nullptr; 1659 } 1660 // Disallow every other use. 1661 return nullptr; 1662 }; 1663 1664 // TODO: In the future we want to track more than just a unique kernel. 1665 SmallPtrSet<Kernel, 2> PotentialKernels; 1666 OMPInformationCache::foreachUse(F, [&](const Use &U) { 1667 PotentialKernels.insert(GetUniqueKernelForUse(U)); 1668 }); 1669 1670 Kernel K = nullptr; 1671 if (PotentialKernels.size() == 1) 1672 K = *PotentialKernels.begin(); 1673 1674 // Cache the result. 1675 UniqueKernelMap[&F] = K; 1676 1677 return K; 1678 } 1679 1680 bool OpenMPOpt::rewriteDeviceCodeStateMachine() { 1681 OMPInformationCache::RuntimeFunctionInfo &KernelPrepareParallelRFI = 1682 OMPInfoCache.RFIs[OMPRTL___kmpc_kernel_prepare_parallel]; 1683 1684 bool Changed = false; 1685 if (!KernelPrepareParallelRFI) 1686 return Changed; 1687 1688 for (Function *F : SCC) { 1689 1690 // Check if the function is uses in a __kmpc_kernel_prepare_parallel call at 1691 // all. 1692 bool UnknownUse = false; 1693 bool KernelPrepareUse = false; 1694 unsigned NumDirectCalls = 0; 1695 1696 SmallVector<Use *, 2> ToBeReplacedStateMachineUses; 1697 OMPInformationCache::foreachUse(*F, [&](Use &U) { 1698 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 1699 if (CB->isCallee(&U)) { 1700 ++NumDirectCalls; 1701 return; 1702 } 1703 1704 if (isa<ICmpInst>(U.getUser())) { 1705 ToBeReplacedStateMachineUses.push_back(&U); 1706 return; 1707 } 1708 if (!KernelPrepareUse && OpenMPOpt::getCallIfRegularCall( 1709 *U.getUser(), &KernelPrepareParallelRFI)) { 1710 KernelPrepareUse = true; 1711 ToBeReplacedStateMachineUses.push_back(&U); 1712 return; 1713 } 1714 UnknownUse = true; 1715 }); 1716 1717 // Do not emit a remark if we haven't seen a __kmpc_kernel_prepare_parallel 1718 // use. 1719 if (!KernelPrepareUse) 1720 continue; 1721 1722 { 1723 auto Remark = [&](OptimizationRemark OR) { 1724 return OR << "Found a parallel region that is called in a target " 1725 "region but not part of a combined target construct nor " 1726 "nesed inside a target construct without intermediate " 1727 "code. This can lead to excessive register usage for " 1728 "unrelated target regions in the same translation unit " 1729 "due to spurious call edges assumed by ptxas."; 1730 }; 1731 emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark); 1732 } 1733 1734 // If this ever hits, we should investigate. 1735 // TODO: Checking the number of uses is not a necessary restriction and 1736 // should be lifted. 1737 if (UnknownUse || NumDirectCalls != 1 || 1738 ToBeReplacedStateMachineUses.size() != 2) { 1739 { 1740 auto Remark = [&](OptimizationRemark OR) { 1741 return OR << "Parallel region is used in " 1742 << (UnknownUse ? "unknown" : "unexpected") 1743 << " ways; will not attempt to rewrite the state machine."; 1744 }; 1745 emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark); 1746 } 1747 continue; 1748 } 1749 1750 // Even if we have __kmpc_kernel_prepare_parallel calls, we (for now) give 1751 // up if the function is not called from a unique kernel. 1752 Kernel K = getUniqueKernelFor(*F); 1753 if (!K) { 1754 { 1755 auto Remark = [&](OptimizationRemark OR) { 1756 return OR << "Parallel region is not known to be called from a " 1757 "unique single target region, maybe the surrounding " 1758 "function has external linkage?; will not attempt to " 1759 "rewrite the state machine use."; 1760 }; 1761 emitRemarkOnFunction(F, "OpenMPParallelRegionInMultipleKernesl", 1762 Remark); 1763 } 1764 continue; 1765 } 1766 1767 // We now know F is a parallel body function called only from the kernel K. 1768 // We also identified the state machine uses in which we replace the 1769 // function pointer by a new global symbol for identification purposes. This 1770 // ensures only direct calls to the function are left. 1771 1772 { 1773 auto RemarkParalleRegion = [&](OptimizationRemark OR) { 1774 return OR << "Specialize parallel region that is only reached from a " 1775 "single target region to avoid spurious call edges and " 1776 "excessive register usage in other target regions. " 1777 "(parallel region ID: " 1778 << ore::NV("OpenMPParallelRegion", F->getName()) 1779 << ", kernel ID: " 1780 << ore::NV("OpenMPTargetRegion", K->getName()) << ")"; 1781 }; 1782 emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", 1783 RemarkParalleRegion); 1784 auto RemarkKernel = [&](OptimizationRemark OR) { 1785 return OR << "Target region containing the parallel region that is " 1786 "specialized. (parallel region ID: " 1787 << ore::NV("OpenMPParallelRegion", F->getName()) 1788 << ", kernel ID: " 1789 << ore::NV("OpenMPTargetRegion", K->getName()) << ")"; 1790 }; 1791 emitRemarkOnFunction(K, "OpenMPParallelRegionInNonSPMD", RemarkKernel); 1792 } 1793 1794 Module &M = *F->getParent(); 1795 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 1796 1797 auto *ID = new GlobalVariable( 1798 M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, 1799 UndefValue::get(Int8Ty), F->getName() + ".ID"); 1800 1801 for (Use *U : ToBeReplacedStateMachineUses) 1802 U->set(ConstantExpr::getBitCast(ID, U->get()->getType())); 1803 1804 ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; 1805 1806 Changed = true; 1807 } 1808 1809 return Changed; 1810 } 1811 1812 /// Abstract Attribute for tracking ICV values. 1813 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 1814 using Base = StateWrapper<BooleanState, AbstractAttribute>; 1815 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 1816 1817 void initialize(Attributor &A) override { 1818 Function *F = getAnchorScope(); 1819 if (!F || !A.isFunctionIPOAmendable(*F)) 1820 indicatePessimisticFixpoint(); 1821 } 1822 1823 /// Returns true if value is assumed to be tracked. 1824 bool isAssumedTracked() const { return getAssumed(); } 1825 1826 /// Returns true if value is known to be tracked. 1827 bool isKnownTracked() const { return getAssumed(); } 1828 1829 /// Create an abstract attribute biew for the position \p IRP. 1830 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 1831 1832 /// Return the value with which \p I can be replaced for specific \p ICV. 1833 virtual Optional<Value *> getReplacementValue(InternalControlVar ICV, 1834 const Instruction *I, 1835 Attributor &A) const { 1836 return None; 1837 } 1838 1839 /// Return an assumed unique ICV value if a single candidate is found. If 1840 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1841 /// Optional::NoneType. 1842 virtual Optional<Value *> 1843 getUniqueReplacementValue(InternalControlVar ICV) const = 0; 1844 1845 // Currently only nthreads is being tracked. 1846 // this array will only grow with time. 1847 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 1848 1849 /// See AbstractAttribute::getName() 1850 const std::string getName() const override { return "AAICVTracker"; } 1851 1852 /// See AbstractAttribute::getIdAddr() 1853 const char *getIdAddr() const override { return &ID; } 1854 1855 /// This function should return true if the type of the \p AA is AAICVTracker 1856 static bool classof(const AbstractAttribute *AA) { 1857 return (AA->getIdAddr() == &ID); 1858 } 1859 1860 static const char ID; 1861 }; 1862 1863 struct AAICVTrackerFunction : public AAICVTracker { 1864 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 1865 : AAICVTracker(IRP, A) {} 1866 1867 // FIXME: come up with better string. 1868 const std::string getAsStr() const override { return "ICVTrackerFunction"; } 1869 1870 // FIXME: come up with some stats. 1871 void trackStatistics() const override {} 1872 1873 /// We don't manifest anything for this AA. 1874 ChangeStatus manifest(Attributor &A) override { 1875 return ChangeStatus::UNCHANGED; 1876 } 1877 1878 // Map of ICV to their values at specific program point. 1879 EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar, 1880 InternalControlVar::ICV___last> 1881 ICVReplacementValuesMap; 1882 1883 ChangeStatus updateImpl(Attributor &A) override { 1884 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 1885 1886 Function *F = getAnchorScope(); 1887 1888 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 1889 1890 for (InternalControlVar ICV : TrackableICVs) { 1891 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 1892 1893 auto &ValuesMap = ICVReplacementValuesMap[ICV]; 1894 auto TrackValues = [&](Use &U, Function &) { 1895 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 1896 if (!CI) 1897 return false; 1898 1899 // FIXME: handle setters with more that 1 arguments. 1900 /// Track new value. 1901 if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) 1902 HasChanged = ChangeStatus::CHANGED; 1903 1904 return false; 1905 }; 1906 1907 auto CallCheck = [&](Instruction &I) { 1908 Optional<Value *> ReplVal = getValueForCall(A, &I, ICV); 1909 if (ReplVal.hasValue() && 1910 ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) 1911 HasChanged = ChangeStatus::CHANGED; 1912 1913 return true; 1914 }; 1915 1916 // Track all changes of an ICV. 1917 SetterRFI.foreachUse(TrackValues, F); 1918 1919 A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, 1920 /* CheckBBLivenessOnly */ true); 1921 1922 /// TODO: Figure out a way to avoid adding entry in 1923 /// ICVReplacementValuesMap 1924 Instruction *Entry = &F->getEntryBlock().front(); 1925 if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) 1926 ValuesMap.insert(std::make_pair(Entry, nullptr)); 1927 } 1928 1929 return HasChanged; 1930 } 1931 1932 /// Hepler to check if \p I is a call and get the value for it if it is 1933 /// unique. 1934 Optional<Value *> getValueForCall(Attributor &A, const Instruction *I, 1935 InternalControlVar &ICV) const { 1936 1937 const auto *CB = dyn_cast<CallBase>(I); 1938 if (!CB || CB->hasFnAttr("no_openmp") || 1939 CB->hasFnAttr("no_openmp_routines")) 1940 return None; 1941 1942 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 1943 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 1944 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 1945 Function *CalledFunction = CB->getCalledFunction(); 1946 1947 // Indirect call, assume ICV changes. 1948 if (CalledFunction == nullptr) 1949 return nullptr; 1950 if (CalledFunction == GetterRFI.Declaration) 1951 return None; 1952 if (CalledFunction == SetterRFI.Declaration) { 1953 if (ICVReplacementValuesMap[ICV].count(I)) 1954 return ICVReplacementValuesMap[ICV].lookup(I); 1955 1956 return nullptr; 1957 } 1958 1959 // Since we don't know, assume it changes the ICV. 1960 if (CalledFunction->isDeclaration()) 1961 return nullptr; 1962 1963 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 1964 *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED); 1965 1966 if (ICVTrackingAA.isAssumedTracked()) 1967 return ICVTrackingAA.getUniqueReplacementValue(ICV); 1968 1969 // If we don't know, assume it changes. 1970 return nullptr; 1971 } 1972 1973 // We don't check unique value for a function, so return None. 1974 Optional<Value *> 1975 getUniqueReplacementValue(InternalControlVar ICV) const override { 1976 return None; 1977 } 1978 1979 /// Return the value with which \p I can be replaced for specific \p ICV. 1980 Optional<Value *> getReplacementValue(InternalControlVar ICV, 1981 const Instruction *I, 1982 Attributor &A) const override { 1983 const auto &ValuesMap = ICVReplacementValuesMap[ICV]; 1984 if (ValuesMap.count(I)) 1985 return ValuesMap.lookup(I); 1986 1987 SmallVector<const Instruction *, 16> Worklist; 1988 SmallPtrSet<const Instruction *, 16> Visited; 1989 Worklist.push_back(I); 1990 1991 Optional<Value *> ReplVal; 1992 1993 while (!Worklist.empty()) { 1994 const Instruction *CurrInst = Worklist.pop_back_val(); 1995 if (!Visited.insert(CurrInst).second) 1996 continue; 1997 1998 const BasicBlock *CurrBB = CurrInst->getParent(); 1999 2000 // Go up and look for all potential setters/calls that might change the 2001 // ICV. 2002 while ((CurrInst = CurrInst->getPrevNode())) { 2003 if (ValuesMap.count(CurrInst)) { 2004 Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst); 2005 // Unknown value, track new. 2006 if (!ReplVal.hasValue()) { 2007 ReplVal = NewReplVal; 2008 break; 2009 } 2010 2011 // If we found a new value, we can't know the icv value anymore. 2012 if (NewReplVal.hasValue()) 2013 if (ReplVal != NewReplVal) 2014 return nullptr; 2015 2016 break; 2017 } 2018 2019 Optional<Value *> NewReplVal = getValueForCall(A, CurrInst, ICV); 2020 if (!NewReplVal.hasValue()) 2021 continue; 2022 2023 // Unknown value, track new. 2024 if (!ReplVal.hasValue()) { 2025 ReplVal = NewReplVal; 2026 break; 2027 } 2028 2029 // if (NewReplVal.hasValue()) 2030 // We found a new value, we can't know the icv value anymore. 2031 if (ReplVal != NewReplVal) 2032 return nullptr; 2033 } 2034 2035 // If we are in the same BB and we have a value, we are done. 2036 if (CurrBB == I->getParent() && ReplVal.hasValue()) 2037 return ReplVal; 2038 2039 // Go through all predecessors and add terminators for analysis. 2040 for (const BasicBlock *Pred : predecessors(CurrBB)) 2041 if (const Instruction *Terminator = Pred->getTerminator()) 2042 Worklist.push_back(Terminator); 2043 } 2044 2045 return ReplVal; 2046 } 2047 }; 2048 2049 struct AAICVTrackerFunctionReturned : AAICVTracker { 2050 AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) 2051 : AAICVTracker(IRP, A) {} 2052 2053 // FIXME: come up with better string. 2054 const std::string getAsStr() const override { 2055 return "ICVTrackerFunctionReturned"; 2056 } 2057 2058 // FIXME: come up with some stats. 2059 void trackStatistics() const override {} 2060 2061 /// We don't manifest anything for this AA. 2062 ChangeStatus manifest(Attributor &A) override { 2063 return ChangeStatus::UNCHANGED; 2064 } 2065 2066 // Map of ICV to their values at specific program point. 2067 EnumeratedArray<Optional<Value *>, InternalControlVar, 2068 InternalControlVar::ICV___last> 2069 ICVReplacementValuesMap; 2070 2071 /// Return the value with which \p I can be replaced for specific \p ICV. 2072 Optional<Value *> 2073 getUniqueReplacementValue(InternalControlVar ICV) const override { 2074 return ICVReplacementValuesMap[ICV]; 2075 } 2076 2077 ChangeStatus updateImpl(Attributor &A) override { 2078 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2079 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2080 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2081 2082 if (!ICVTrackingAA.isAssumedTracked()) 2083 return indicatePessimisticFixpoint(); 2084 2085 for (InternalControlVar ICV : TrackableICVs) { 2086 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2087 Optional<Value *> UniqueICVValue; 2088 2089 auto CheckReturnInst = [&](Instruction &I) { 2090 Optional<Value *> NewReplVal = 2091 ICVTrackingAA.getReplacementValue(ICV, &I, A); 2092 2093 // If we found a second ICV value there is no unique returned value. 2094 if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal) 2095 return false; 2096 2097 UniqueICVValue = NewReplVal; 2098 2099 return true; 2100 }; 2101 2102 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, 2103 /* CheckBBLivenessOnly */ true)) 2104 UniqueICVValue = nullptr; 2105 2106 if (UniqueICVValue == ReplVal) 2107 continue; 2108 2109 ReplVal = UniqueICVValue; 2110 Changed = ChangeStatus::CHANGED; 2111 } 2112 2113 return Changed; 2114 } 2115 }; 2116 2117 struct AAICVTrackerCallSite : AAICVTracker { 2118 AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) 2119 : AAICVTracker(IRP, A) {} 2120 2121 void initialize(Attributor &A) override { 2122 Function *F = getAnchorScope(); 2123 if (!F || !A.isFunctionIPOAmendable(*F)) 2124 indicatePessimisticFixpoint(); 2125 2126 // We only initialize this AA for getters, so we need to know which ICV it 2127 // gets. 2128 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2129 for (InternalControlVar ICV : TrackableICVs) { 2130 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 2131 auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; 2132 if (Getter.Declaration == getAssociatedFunction()) { 2133 AssociatedICV = ICVInfo.Kind; 2134 return; 2135 } 2136 } 2137 2138 /// Unknown ICV. 2139 indicatePessimisticFixpoint(); 2140 } 2141 2142 ChangeStatus manifest(Attributor &A) override { 2143 if (!ReplVal.hasValue() || !ReplVal.getValue()) 2144 return ChangeStatus::UNCHANGED; 2145 2146 A.changeValueAfterManifest(*getCtxI(), **ReplVal); 2147 A.deleteAfterManifest(*getCtxI()); 2148 2149 return ChangeStatus::CHANGED; 2150 } 2151 2152 // FIXME: come up with better string. 2153 const std::string getAsStr() const override { return "ICVTrackerCallSite"; } 2154 2155 // FIXME: come up with some stats. 2156 void trackStatistics() const override {} 2157 2158 InternalControlVar AssociatedICV; 2159 Optional<Value *> ReplVal; 2160 2161 ChangeStatus updateImpl(Attributor &A) override { 2162 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2163 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2164 2165 // We don't have any information, so we assume it changes the ICV. 2166 if (!ICVTrackingAA.isAssumedTracked()) 2167 return indicatePessimisticFixpoint(); 2168 2169 Optional<Value *> NewReplVal = 2170 ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A); 2171 2172 if (ReplVal == NewReplVal) 2173 return ChangeStatus::UNCHANGED; 2174 2175 ReplVal = NewReplVal; 2176 return ChangeStatus::CHANGED; 2177 } 2178 2179 // Return the value with which associated value can be replaced for specific 2180 // \p ICV. 2181 Optional<Value *> 2182 getUniqueReplacementValue(InternalControlVar ICV) const override { 2183 return ReplVal; 2184 } 2185 }; 2186 2187 struct AAICVTrackerCallSiteReturned : AAICVTracker { 2188 AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) 2189 : AAICVTracker(IRP, A) {} 2190 2191 // FIXME: come up with better string. 2192 const std::string getAsStr() const override { 2193 return "ICVTrackerCallSiteReturned"; 2194 } 2195 2196 // FIXME: come up with some stats. 2197 void trackStatistics() const override {} 2198 2199 /// We don't manifest anything for this AA. 2200 ChangeStatus manifest(Attributor &A) override { 2201 return ChangeStatus::UNCHANGED; 2202 } 2203 2204 // Map of ICV to their values at specific program point. 2205 EnumeratedArray<Optional<Value *>, InternalControlVar, 2206 InternalControlVar::ICV___last> 2207 ICVReplacementValuesMap; 2208 2209 /// Return the value with which associated value can be replaced for specific 2210 /// \p ICV. 2211 Optional<Value *> 2212 getUniqueReplacementValue(InternalControlVar ICV) const override { 2213 return ICVReplacementValuesMap[ICV]; 2214 } 2215 2216 ChangeStatus updateImpl(Attributor &A) override { 2217 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2218 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2219 *this, IRPosition::returned(*getAssociatedFunction()), 2220 DepClassTy::REQUIRED); 2221 2222 // We don't have any information, so we assume it changes the ICV. 2223 if (!ICVTrackingAA.isAssumedTracked()) 2224 return indicatePessimisticFixpoint(); 2225 2226 for (InternalControlVar ICV : TrackableICVs) { 2227 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2228 Optional<Value *> NewReplVal = 2229 ICVTrackingAA.getUniqueReplacementValue(ICV); 2230 2231 if (ReplVal == NewReplVal) 2232 continue; 2233 2234 ReplVal = NewReplVal; 2235 Changed = ChangeStatus::CHANGED; 2236 } 2237 return Changed; 2238 } 2239 }; 2240 } // namespace 2241 2242 const char AAICVTracker::ID = 0; 2243 2244 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 2245 Attributor &A) { 2246 AAICVTracker *AA = nullptr; 2247 switch (IRP.getPositionKind()) { 2248 case IRPosition::IRP_INVALID: 2249 case IRPosition::IRP_FLOAT: 2250 case IRPosition::IRP_ARGUMENT: 2251 case IRPosition::IRP_CALL_SITE_ARGUMENT: 2252 llvm_unreachable("ICVTracker can only be created for function position!"); 2253 case IRPosition::IRP_RETURNED: 2254 AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); 2255 break; 2256 case IRPosition::IRP_CALL_SITE_RETURNED: 2257 AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); 2258 break; 2259 case IRPosition::IRP_CALL_SITE: 2260 AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); 2261 break; 2262 case IRPosition::IRP_FUNCTION: 2263 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 2264 break; 2265 } 2266 2267 return *AA; 2268 } 2269 2270 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { 2271 if (!containsOpenMP(M, OMPInModule)) 2272 return PreservedAnalyses::all(); 2273 2274 if (DisableOpenMPOptimizations) 2275 return PreservedAnalyses::all(); 2276 2277 // Look at every function definition in the Module. 2278 SmallVector<Function *, 16> SCC; 2279 for (Function &Fn : M) 2280 if (!Fn.isDeclaration()) 2281 SCC.push_back(&Fn); 2282 2283 if (SCC.empty()) 2284 return PreservedAnalyses::all(); 2285 2286 FunctionAnalysisManager &FAM = 2287 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 2288 2289 AnalysisGetter AG(FAM); 2290 2291 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 2292 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 2293 }; 2294 2295 BumpPtrAllocator Allocator; 2296 CallGraphUpdater CGUpdater; 2297 2298 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 2299 OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, 2300 OMPInModule.getKernels()); 2301 2302 Attributor A(Functions, InfoCache, CGUpdater); 2303 2304 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 2305 bool Changed = OMPOpt.run(true); 2306 if (Changed) 2307 return PreservedAnalyses::none(); 2308 2309 return PreservedAnalyses::all(); 2310 } 2311 2312 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, 2313 CGSCCAnalysisManager &AM, 2314 LazyCallGraph &CG, 2315 CGSCCUpdateResult &UR) { 2316 if (!containsOpenMP(*C.begin()->getFunction().getParent(), OMPInModule)) 2317 return PreservedAnalyses::all(); 2318 2319 if (DisableOpenMPOptimizations) 2320 return PreservedAnalyses::all(); 2321 2322 SmallVector<Function *, 16> SCC; 2323 // If there are kernels in the module, we have to run on all SCC's. 2324 bool SCCIsInteresting = !OMPInModule.getKernels().empty(); 2325 for (LazyCallGraph::Node &N : C) { 2326 Function *Fn = &N.getFunction(); 2327 SCC.push_back(Fn); 2328 2329 // Do we already know that the SCC contains kernels, 2330 // or that OpenMP functions are called from this SCC? 2331 if (SCCIsInteresting) 2332 continue; 2333 // If not, let's check that. 2334 SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn); 2335 } 2336 2337 if (!SCCIsInteresting || SCC.empty()) 2338 return PreservedAnalyses::all(); 2339 2340 FunctionAnalysisManager &FAM = 2341 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 2342 2343 AnalysisGetter AG(FAM); 2344 2345 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 2346 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 2347 }; 2348 2349 BumpPtrAllocator Allocator; 2350 CallGraphUpdater CGUpdater; 2351 CGUpdater.initialize(CG, C, AM, UR); 2352 2353 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 2354 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 2355 /*CGSCC*/ Functions, OMPInModule.getKernels()); 2356 2357 Attributor A(Functions, InfoCache, CGUpdater); 2358 2359 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 2360 bool Changed = OMPOpt.run(false); 2361 if (Changed) 2362 return PreservedAnalyses::none(); 2363 2364 return PreservedAnalyses::all(); 2365 } 2366 namespace { 2367 2368 struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass { 2369 CallGraphUpdater CGUpdater; 2370 OpenMPInModule OMPInModule; 2371 static char ID; 2372 2373 OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) { 2374 initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry()); 2375 } 2376 2377 void getAnalysisUsage(AnalysisUsage &AU) const override { 2378 CallGraphSCCPass::getAnalysisUsage(AU); 2379 } 2380 2381 bool doInitialization(CallGraph &CG) override { 2382 // Disable the pass if there is no OpenMP (runtime call) in the module. 2383 containsOpenMP(CG.getModule(), OMPInModule); 2384 return false; 2385 } 2386 2387 bool runOnSCC(CallGraphSCC &CGSCC) override { 2388 if (!containsOpenMP(CGSCC.getCallGraph().getModule(), OMPInModule)) 2389 return false; 2390 if (DisableOpenMPOptimizations || skipSCC(CGSCC)) 2391 return false; 2392 2393 SmallVector<Function *, 16> SCC; 2394 // If there are kernels in the module, we have to run on all SCC's. 2395 bool SCCIsInteresting = !OMPInModule.getKernels().empty(); 2396 for (CallGraphNode *CGN : CGSCC) { 2397 Function *Fn = CGN->getFunction(); 2398 if (!Fn || Fn->isDeclaration()) 2399 continue; 2400 SCC.push_back(Fn); 2401 2402 // Do we already know that the SCC contains kernels, 2403 // or that OpenMP functions are called from this SCC? 2404 if (SCCIsInteresting) 2405 continue; 2406 // If not, let's check that. 2407 SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn); 2408 } 2409 2410 if (!SCCIsInteresting || SCC.empty()) 2411 return false; 2412 2413 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 2414 CGUpdater.initialize(CG, CGSCC); 2415 2416 // Maintain a map of functions to avoid rebuilding the ORE 2417 DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap; 2418 auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & { 2419 std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F]; 2420 if (!ORE) 2421 ORE = std::make_unique<OptimizationRemarkEmitter>(F); 2422 return *ORE; 2423 }; 2424 2425 AnalysisGetter AG; 2426 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 2427 BumpPtrAllocator Allocator; 2428 OMPInformationCache InfoCache( 2429 *(Functions.back()->getParent()), AG, Allocator, 2430 /*CGSCC*/ Functions, OMPInModule.getKernels()); 2431 2432 Attributor A(Functions, InfoCache, CGUpdater); 2433 2434 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 2435 return OMPOpt.run(false); 2436 } 2437 2438 bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } 2439 }; 2440 2441 } // end anonymous namespace 2442 2443 void OpenMPInModule::identifyKernels(Module &M) { 2444 2445 NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 2446 if (!MD) 2447 return; 2448 2449 for (auto *Op : MD->operands()) { 2450 if (Op->getNumOperands() < 2) 2451 continue; 2452 MDString *KindID = dyn_cast<MDString>(Op->getOperand(1)); 2453 if (!KindID || KindID->getString() != "kernel") 2454 continue; 2455 2456 Function *KernelFn = 2457 mdconst::dyn_extract_or_null<Function>(Op->getOperand(0)); 2458 if (!KernelFn) 2459 continue; 2460 2461 ++NumOpenMPTargetRegionKernels; 2462 2463 Kernels.insert(KernelFn); 2464 } 2465 } 2466 2467 bool llvm::omp::containsOpenMP(Module &M, OpenMPInModule &OMPInModule) { 2468 if (OMPInModule.isKnown()) 2469 return OMPInModule; 2470 2471 auto RecordFunctionsContainingUsesOf = [&](Function *F) { 2472 for (User *U : F->users()) 2473 if (auto *I = dyn_cast<Instruction>(U)) 2474 OMPInModule.FuncsWithOMPRuntimeCalls.insert(I->getFunction()); 2475 }; 2476 2477 // MSVC doesn't like long if-else chains for some reason and instead just 2478 // issues an error. Work around it.. 2479 do { 2480 #define OMP_RTL(_Enum, _Name, ...) \ 2481 if (Function *F = M.getFunction(_Name)) { \ 2482 RecordFunctionsContainingUsesOf(F); \ 2483 OMPInModule = true; \ 2484 } 2485 #include "llvm/Frontend/OpenMP/OMPKinds.def" 2486 } while (false); 2487 2488 // Identify kernels once. TODO: We should split the OMPInformationCache into a 2489 // module and an SCC part. The kernel information, among other things, could 2490 // go into the module part. 2491 if (OMPInModule.isKnown() && OMPInModule) { 2492 OMPInModule.identifyKernels(M); 2493 return true; 2494 } 2495 2496 return OMPInModule = false; 2497 } 2498 2499 char OpenMPOptCGSCCLegacyPass::ID = 0; 2500 2501 INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 2502 "OpenMP specific optimizations", false, false) 2503 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 2504 INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 2505 "OpenMP specific optimizations", false, false) 2506 2507 Pass *llvm::createOpenMPOptCGSCCLegacyPass() { 2508 return new OpenMPOptCGSCCLegacyPass(); 2509 } 2510