1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // - Replacing globalized device memory with stack memory. 13 // - Replacing globalized device memory with shared memory. 14 // - Parallel region merging. 15 // - Transforming generic-mode device kernels to SPMD mode. 16 // - Specializing the state machine for generic-mode device kernels. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/IPO/OpenMPOpt.h" 21 22 #include "llvm/ADT/EnumeratedArray.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/CallGraph.h" 26 #include "llvm/Analysis/CallGraphSCCPass.h" 27 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/Frontend/OpenMP/OMPConstants.h" 30 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 31 #include "llvm/IR/Assumptions.h" 32 #include "llvm/IR/DiagnosticInfo.h" 33 #include "llvm/IR/GlobalValue.h" 34 #include "llvm/IR/Instruction.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/InitializePasses.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Transforms/IPO.h" 39 #include "llvm/Transforms/IPO/Attributor.h" 40 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 41 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 42 #include "llvm/Transforms/Utils/CodeExtractor.h" 43 44 using namespace llvm; 45 using namespace omp; 46 47 #define DEBUG_TYPE "openmp-opt" 48 49 static cl::opt<bool> DisableOpenMPOptimizations( 50 "openmp-opt-disable", cl::ZeroOrMore, 51 cl::desc("Disable OpenMP specific optimizations."), cl::Hidden, 52 cl::init(false)); 53 54 static cl::opt<bool> EnableParallelRegionMerging( 55 "openmp-opt-enable-merging", cl::ZeroOrMore, 56 cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden, 57 cl::init(false)); 58 59 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 60 cl::Hidden); 61 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels", 62 cl::init(false), cl::Hidden); 63 64 static cl::opt<bool> HideMemoryTransferLatency( 65 "openmp-hide-memory-transfer-latency", 66 cl::desc("[WIP] Tries to hide the latency of host to device memory" 67 " transfers"), 68 cl::Hidden, cl::init(false)); 69 70 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 71 "Number of OpenMP runtime calls deduplicated"); 72 STATISTIC(NumOpenMPParallelRegionsDeleted, 73 "Number of OpenMP parallel regions deleted"); 74 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 75 "Number of OpenMP runtime functions identified"); 76 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 77 "Number of OpenMP runtime function uses identified"); 78 STATISTIC(NumOpenMPTargetRegionKernels, 79 "Number of OpenMP target region entry points (=kernels) identified"); 80 STATISTIC(NumOpenMPTargetRegionKernelsSPMD, 81 "Number of OpenMP target region entry points (=kernels) executed in " 82 "SPMD-mode instead of generic-mode"); 83 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine, 84 "Number of OpenMP target region entry points (=kernels) executed in " 85 "generic-mode without a state machines"); 86 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback, 87 "Number of OpenMP target region entry points (=kernels) executed in " 88 "generic-mode with customized state machines with fallback"); 89 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback, 90 "Number of OpenMP target region entry points (=kernels) executed in " 91 "generic-mode with customized state machines without fallback"); 92 STATISTIC( 93 NumOpenMPParallelRegionsReplacedInGPUStateMachine, 94 "Number of OpenMP parallel regions replaced with ID in GPU state machines"); 95 STATISTIC(NumOpenMPParallelRegionsMerged, 96 "Number of OpenMP parallel regions merged"); 97 STATISTIC(NumBytesMovedToSharedMemory, 98 "Amount of memory pushed to shared memory"); 99 100 #if !defined(NDEBUG) 101 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 102 #endif 103 104 namespace { 105 106 enum class AddressSpace : unsigned { 107 Generic = 0, 108 Global = 1, 109 Shared = 3, 110 Constant = 4, 111 Local = 5, 112 }; 113 114 struct AAHeapToShared; 115 116 struct AAICVTracker; 117 118 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 119 /// Attributor runs. 120 struct OMPInformationCache : public InformationCache { 121 OMPInformationCache(Module &M, AnalysisGetter &AG, 122 BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC, 123 SmallPtrSetImpl<Kernel> &Kernels) 124 : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M), 125 Kernels(Kernels) { 126 127 OMPBuilder.initialize(); 128 initializeRuntimeFunctions(); 129 initializeInternalControlVars(); 130 } 131 132 /// Generic information that describes an internal control variable. 133 struct InternalControlVarInfo { 134 /// The kind, as described by InternalControlVar enum. 135 InternalControlVar Kind; 136 137 /// The name of the ICV. 138 StringRef Name; 139 140 /// Environment variable associated with this ICV. 141 StringRef EnvVarName; 142 143 /// Initial value kind. 144 ICVInitValue InitKind; 145 146 /// Initial value. 147 ConstantInt *InitValue; 148 149 /// Setter RTL function associated with this ICV. 150 RuntimeFunction Setter; 151 152 /// Getter RTL function associated with this ICV. 153 RuntimeFunction Getter; 154 155 /// RTL Function corresponding to the override clause of this ICV 156 RuntimeFunction Clause; 157 }; 158 159 /// Generic information that describes a runtime function 160 struct RuntimeFunctionInfo { 161 162 /// The kind, as described by the RuntimeFunction enum. 163 RuntimeFunction Kind; 164 165 /// The name of the function. 166 StringRef Name; 167 168 /// Flag to indicate a variadic function. 169 bool IsVarArg; 170 171 /// The return type of the function. 172 Type *ReturnType; 173 174 /// The argument types of the function. 175 SmallVector<Type *, 8> ArgumentTypes; 176 177 /// The declaration if available. 178 Function *Declaration = nullptr; 179 180 /// Uses of this runtime function per function containing the use. 181 using UseVector = SmallVector<Use *, 16>; 182 183 /// Clear UsesMap for runtime function. 184 void clearUsesMap() { UsesMap.clear(); } 185 186 /// Boolean conversion that is true if the runtime function was found. 187 operator bool() const { return Declaration; } 188 189 /// Return the vector of uses in function \p F. 190 UseVector &getOrCreateUseVector(Function *F) { 191 std::shared_ptr<UseVector> &UV = UsesMap[F]; 192 if (!UV) 193 UV = std::make_shared<UseVector>(); 194 return *UV; 195 } 196 197 /// Return the vector of uses in function \p F or `nullptr` if there are 198 /// none. 199 const UseVector *getUseVector(Function &F) const { 200 auto I = UsesMap.find(&F); 201 if (I != UsesMap.end()) 202 return I->second.get(); 203 return nullptr; 204 } 205 206 /// Return how many functions contain uses of this runtime function. 207 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 208 209 /// Return the number of arguments (or the minimal number for variadic 210 /// functions). 211 size_t getNumArgs() const { return ArgumentTypes.size(); } 212 213 /// Run the callback \p CB on each use and forget the use if the result is 214 /// true. The callback will be fed the function in which the use was 215 /// encountered as second argument. 216 void foreachUse(SmallVectorImpl<Function *> &SCC, 217 function_ref<bool(Use &, Function &)> CB) { 218 for (Function *F : SCC) 219 foreachUse(CB, F); 220 } 221 222 /// Run the callback \p CB on each use within the function \p F and forget 223 /// the use if the result is true. 224 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) { 225 SmallVector<unsigned, 8> ToBeDeleted; 226 ToBeDeleted.clear(); 227 228 unsigned Idx = 0; 229 UseVector &UV = getOrCreateUseVector(F); 230 231 for (Use *U : UV) { 232 if (CB(*U, *F)) 233 ToBeDeleted.push_back(Idx); 234 ++Idx; 235 } 236 237 // Remove the to-be-deleted indices in reverse order as prior 238 // modifications will not modify the smaller indices. 239 while (!ToBeDeleted.empty()) { 240 unsigned Idx = ToBeDeleted.pop_back_val(); 241 UV[Idx] = UV.back(); 242 UV.pop_back(); 243 } 244 } 245 246 private: 247 /// Map from functions to all uses of this runtime function contained in 248 /// them. 249 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 250 251 public: 252 /// Iterators for the uses of this runtime function. 253 decltype(UsesMap)::iterator begin() { return UsesMap.begin(); } 254 decltype(UsesMap)::iterator end() { return UsesMap.end(); } 255 }; 256 257 /// An OpenMP-IR-Builder instance 258 OpenMPIRBuilder OMPBuilder; 259 260 /// Map from runtime function kind to the runtime function description. 261 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 262 RuntimeFunction::OMPRTL___last> 263 RFIs; 264 265 /// Map from function declarations/definitions to their runtime enum type. 266 DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap; 267 268 /// Map from ICV kind to the ICV description. 269 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 270 InternalControlVar::ICV___last> 271 ICVs; 272 273 /// Helper to initialize all internal control variable information for those 274 /// defined in OMPKinds.def. 275 void initializeInternalControlVars() { 276 #define ICV_RT_SET(_Name, RTL) \ 277 { \ 278 auto &ICV = ICVs[_Name]; \ 279 ICV.Setter = RTL; \ 280 } 281 #define ICV_RT_GET(Name, RTL) \ 282 { \ 283 auto &ICV = ICVs[Name]; \ 284 ICV.Getter = RTL; \ 285 } 286 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 287 { \ 288 auto &ICV = ICVs[Enum]; \ 289 ICV.Name = _Name; \ 290 ICV.Kind = Enum; \ 291 ICV.InitKind = Init; \ 292 ICV.EnvVarName = _EnvVarName; \ 293 switch (ICV.InitKind) { \ 294 case ICV_IMPLEMENTATION_DEFINED: \ 295 ICV.InitValue = nullptr; \ 296 break; \ 297 case ICV_ZERO: \ 298 ICV.InitValue = ConstantInt::get( \ 299 Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ 300 break; \ 301 case ICV_FALSE: \ 302 ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ 303 break; \ 304 case ICV_LAST: \ 305 break; \ 306 } \ 307 } 308 #include "llvm/Frontend/OpenMP/OMPKinds.def" 309 } 310 311 /// Returns true if the function declaration \p F matches the runtime 312 /// function types, that is, return type \p RTFRetType, and argument types 313 /// \p RTFArgTypes. 314 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 315 SmallVector<Type *, 8> &RTFArgTypes) { 316 // TODO: We should output information to the user (under debug output 317 // and via remarks). 318 319 if (!F) 320 return false; 321 if (F->getReturnType() != RTFRetType) 322 return false; 323 if (F->arg_size() != RTFArgTypes.size()) 324 return false; 325 326 auto RTFTyIt = RTFArgTypes.begin(); 327 for (Argument &Arg : F->args()) { 328 if (Arg.getType() != *RTFTyIt) 329 return false; 330 331 ++RTFTyIt; 332 } 333 334 return true; 335 } 336 337 // Helper to collect all uses of the declaration in the UsesMap. 338 unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { 339 unsigned NumUses = 0; 340 if (!RFI.Declaration) 341 return NumUses; 342 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 343 344 if (CollectStats) { 345 NumOpenMPRuntimeFunctionsIdentified += 1; 346 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 347 } 348 349 // TODO: We directly convert uses into proper calls and unknown uses. 350 for (Use &U : RFI.Declaration->uses()) { 351 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 352 if (ModuleSlice.count(UserI->getFunction())) { 353 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 354 ++NumUses; 355 } 356 } else { 357 RFI.getOrCreateUseVector(nullptr).push_back(&U); 358 ++NumUses; 359 } 360 } 361 return NumUses; 362 } 363 364 // Helper function to recollect uses of a runtime function. 365 void recollectUsesForFunction(RuntimeFunction RTF) { 366 auto &RFI = RFIs[RTF]; 367 RFI.clearUsesMap(); 368 collectUses(RFI, /*CollectStats*/ false); 369 } 370 371 // Helper function to recollect uses of all runtime functions. 372 void recollectUses() { 373 for (int Idx = 0; Idx < RFIs.size(); ++Idx) 374 recollectUsesForFunction(static_cast<RuntimeFunction>(Idx)); 375 } 376 377 /// Helper to initialize all runtime function information for those defined 378 /// in OpenMPKinds.def. 379 void initializeRuntimeFunctions() { 380 Module &M = *((*ModuleSlice.begin())->getParent()); 381 382 // Helper macros for handling __VA_ARGS__ in OMP_RTL 383 #define OMP_TYPE(VarName, ...) \ 384 Type *VarName = OMPBuilder.VarName; \ 385 (void)VarName; 386 387 #define OMP_ARRAY_TYPE(VarName, ...) \ 388 ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ 389 (void)VarName##Ty; \ 390 PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ 391 (void)VarName##PtrTy; 392 393 #define OMP_FUNCTION_TYPE(VarName, ...) \ 394 FunctionType *VarName = OMPBuilder.VarName; \ 395 (void)VarName; \ 396 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 397 (void)VarName##Ptr; 398 399 #define OMP_STRUCT_TYPE(VarName, ...) \ 400 StructType *VarName = OMPBuilder.VarName; \ 401 (void)VarName; \ 402 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 403 (void)VarName##Ptr; 404 405 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 406 { \ 407 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 408 Function *F = M.getFunction(_Name); \ 409 RTLFunctions.insert(F); \ 410 if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ 411 RuntimeFunctionIDMap[F] = _Enum; \ 412 auto &RFI = RFIs[_Enum]; \ 413 RFI.Kind = _Enum; \ 414 RFI.Name = _Name; \ 415 RFI.IsVarArg = _IsVarArg; \ 416 RFI.ReturnType = OMPBuilder._ReturnType; \ 417 RFI.ArgumentTypes = std::move(ArgsTypes); \ 418 RFI.Declaration = F; \ 419 unsigned NumUses = collectUses(RFI); \ 420 (void)NumUses; \ 421 LLVM_DEBUG({ \ 422 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 423 << " found\n"; \ 424 if (RFI.Declaration) \ 425 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 426 << RFI.getNumFunctionsWithUses() \ 427 << " different functions.\n"; \ 428 }); \ 429 } \ 430 } 431 #include "llvm/Frontend/OpenMP/OMPKinds.def" 432 433 // TODO: We should attach the attributes defined in OMPKinds.def. 434 } 435 436 /// Collection of known kernels (\see Kernel) in the module. 437 SmallPtrSetImpl<Kernel> &Kernels; 438 439 /// Collection of known OpenMP runtime functions.. 440 DenseSet<const Function *> RTLFunctions; 441 }; 442 443 template <typename Ty, bool InsertInvalidates = true> 444 struct BooleanStateWithPtrSetVector : public BooleanState { 445 446 bool contains(Ty *Elem) const { return Set.contains(Elem); } 447 bool insert(Ty *Elem) { 448 if (InsertInvalidates) 449 BooleanState::indicatePessimisticFixpoint(); 450 return Set.insert(Elem); 451 } 452 453 Ty *operator[](int Idx) const { return Set[Idx]; } 454 bool operator==(const BooleanStateWithPtrSetVector &RHS) const { 455 return BooleanState::operator==(RHS) && Set == RHS.Set; 456 } 457 bool operator!=(const BooleanStateWithPtrSetVector &RHS) const { 458 return !(*this == RHS); 459 } 460 461 bool empty() const { return Set.empty(); } 462 size_t size() const { return Set.size(); } 463 464 /// "Clamp" this state with \p RHS. 465 BooleanStateWithPtrSetVector & 466 operator^=(const BooleanStateWithPtrSetVector &RHS) { 467 BooleanState::operator^=(RHS); 468 Set.insert(RHS.Set.begin(), RHS.Set.end()); 469 return *this; 470 } 471 472 private: 473 /// A set to keep track of elements. 474 SetVector<Ty *> Set; 475 476 public: 477 typename decltype(Set)::iterator begin() { return Set.begin(); } 478 typename decltype(Set)::iterator end() { return Set.end(); } 479 typename decltype(Set)::const_iterator begin() const { return Set.begin(); } 480 typename decltype(Set)::const_iterator end() const { return Set.end(); } 481 }; 482 483 struct KernelInfoState : AbstractState { 484 /// Flag to track if we reached a fixpoint. 485 bool IsAtFixpoint = false; 486 487 /// The parallel regions (identified by the outlined parallel functions) that 488 /// can be reached from the associated function. 489 BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false> 490 ReachedKnownParallelRegions; 491 492 /// State to track what parallel region we might reach. 493 BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions; 494 495 /// State to track if we are in SPMD-mode, assumed or know, and why we decided 496 /// we cannot be. 497 BooleanStateWithPtrSetVector<Instruction> SPMDCompatibilityTracker; 498 499 /// The __kmpc_target_init call in this kernel, if any. If we find more than 500 /// one we abort as the kernel is malformed. 501 CallBase *KernelInitCB = nullptr; 502 503 /// The __kmpc_target_deinit call in this kernel, if any. If we find more than 504 /// one we abort as the kernel is malformed. 505 CallBase *KernelDeinitCB = nullptr; 506 507 /// Flag to indicate if the associated function is a kernel entry. 508 bool IsKernelEntry = false; 509 510 /// State to track what kernel entries can reach the associated function. 511 BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries; 512 513 /// Abstract State interface 514 ///{ 515 516 KernelInfoState() {} 517 KernelInfoState(bool BestState) { 518 if (!BestState) 519 indicatePessimisticFixpoint(); 520 } 521 522 /// See AbstractState::isValidState(...) 523 bool isValidState() const override { return true; } 524 525 /// See AbstractState::isAtFixpoint(...) 526 bool isAtFixpoint() const override { return IsAtFixpoint; } 527 528 /// See AbstractState::indicatePessimisticFixpoint(...) 529 ChangeStatus indicatePessimisticFixpoint() override { 530 IsAtFixpoint = true; 531 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 532 ReachedUnknownParallelRegions.indicatePessimisticFixpoint(); 533 return ChangeStatus::CHANGED; 534 } 535 536 /// See AbstractState::indicateOptimisticFixpoint(...) 537 ChangeStatus indicateOptimisticFixpoint() override { 538 IsAtFixpoint = true; 539 return ChangeStatus::UNCHANGED; 540 } 541 542 /// Return the assumed state 543 KernelInfoState &getAssumed() { return *this; } 544 const KernelInfoState &getAssumed() const { return *this; } 545 546 bool operator==(const KernelInfoState &RHS) const { 547 if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker) 548 return false; 549 if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions) 550 return false; 551 if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions) 552 return false; 553 if (ReachingKernelEntries != RHS.ReachingKernelEntries) 554 return false; 555 return true; 556 } 557 558 /// Return empty set as the best state of potential values. 559 static KernelInfoState getBestState() { return KernelInfoState(true); } 560 561 static KernelInfoState getBestState(KernelInfoState &KIS) { 562 return getBestState(); 563 } 564 565 /// Return full set as the worst state of potential values. 566 static KernelInfoState getWorstState() { return KernelInfoState(false); } 567 568 /// "Clamp" this state with \p KIS. 569 KernelInfoState operator^=(const KernelInfoState &KIS) { 570 // Do not merge two different _init and _deinit call sites. 571 if (KIS.KernelInitCB) { 572 if (KernelInitCB && KernelInitCB != KIS.KernelInitCB) 573 indicatePessimisticFixpoint(); 574 KernelInitCB = KIS.KernelInitCB; 575 } 576 if (KIS.KernelDeinitCB) { 577 if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB) 578 indicatePessimisticFixpoint(); 579 KernelDeinitCB = KIS.KernelDeinitCB; 580 } 581 SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker; 582 ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions; 583 ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions; 584 return *this; 585 } 586 587 KernelInfoState operator&=(const KernelInfoState &KIS) { 588 return (*this ^= KIS); 589 } 590 591 ///} 592 }; 593 594 /// Used to map the values physically (in the IR) stored in an offload 595 /// array, to a vector in memory. 596 struct OffloadArray { 597 /// Physical array (in the IR). 598 AllocaInst *Array = nullptr; 599 /// Mapped values. 600 SmallVector<Value *, 8> StoredValues; 601 /// Last stores made in the offload array. 602 SmallVector<StoreInst *, 8> LastAccesses; 603 604 OffloadArray() = default; 605 606 /// Initializes the OffloadArray with the values stored in \p Array before 607 /// instruction \p Before is reached. Returns false if the initialization 608 /// fails. 609 /// This MUST be used immediately after the construction of the object. 610 bool initialize(AllocaInst &Array, Instruction &Before) { 611 if (!Array.getAllocatedType()->isArrayTy()) 612 return false; 613 614 if (!getValues(Array, Before)) 615 return false; 616 617 this->Array = &Array; 618 return true; 619 } 620 621 static const unsigned DeviceIDArgNum = 1; 622 static const unsigned BasePtrsArgNum = 3; 623 static const unsigned PtrsArgNum = 4; 624 static const unsigned SizesArgNum = 5; 625 626 private: 627 /// Traverses the BasicBlock where \p Array is, collecting the stores made to 628 /// \p Array, leaving StoredValues with the values stored before the 629 /// instruction \p Before is reached. 630 bool getValues(AllocaInst &Array, Instruction &Before) { 631 // Initialize container. 632 const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements(); 633 StoredValues.assign(NumValues, nullptr); 634 LastAccesses.assign(NumValues, nullptr); 635 636 // TODO: This assumes the instruction \p Before is in the same 637 // BasicBlock as Array. Make it general, for any control flow graph. 638 BasicBlock *BB = Array.getParent(); 639 if (BB != Before.getParent()) 640 return false; 641 642 const DataLayout &DL = Array.getModule()->getDataLayout(); 643 const unsigned int PointerSize = DL.getPointerSize(); 644 645 for (Instruction &I : *BB) { 646 if (&I == &Before) 647 break; 648 649 if (!isa<StoreInst>(&I)) 650 continue; 651 652 auto *S = cast<StoreInst>(&I); 653 int64_t Offset = -1; 654 auto *Dst = 655 GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL); 656 if (Dst == &Array) { 657 int64_t Idx = Offset / PointerSize; 658 StoredValues[Idx] = getUnderlyingObject(S->getValueOperand()); 659 LastAccesses[Idx] = S; 660 } 661 } 662 663 return isFilled(); 664 } 665 666 /// Returns true if all values in StoredValues and 667 /// LastAccesses are not nullptrs. 668 bool isFilled() { 669 const unsigned NumValues = StoredValues.size(); 670 for (unsigned I = 0; I < NumValues; ++I) { 671 if (!StoredValues[I] || !LastAccesses[I]) 672 return false; 673 } 674 675 return true; 676 } 677 }; 678 679 struct OpenMPOpt { 680 681 using OptimizationRemarkGetter = 682 function_ref<OptimizationRemarkEmitter &(Function *)>; 683 684 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 685 OptimizationRemarkGetter OREGetter, 686 OMPInformationCache &OMPInfoCache, Attributor &A) 687 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 688 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 689 690 /// Check if any remarks are enabled for openmp-opt 691 bool remarksEnabled() { 692 auto &Ctx = M.getContext(); 693 return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE); 694 } 695 696 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 697 bool run(bool IsModulePass) { 698 if (SCC.empty()) 699 return false; 700 701 bool Changed = false; 702 703 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 704 << " functions in a slice with " 705 << OMPInfoCache.ModuleSlice.size() << " functions\n"); 706 707 if (IsModulePass) { 708 Changed |= runAttributor(IsModulePass); 709 710 // Recollect uses, in case Attributor deleted any. 711 OMPInfoCache.recollectUses(); 712 713 if (remarksEnabled()) 714 analysisGlobalization(); 715 } else { 716 if (PrintICVValues) 717 printICVs(); 718 if (PrintOpenMPKernels) 719 printKernels(); 720 721 Changed |= runAttributor(IsModulePass); 722 723 // Recollect uses, in case Attributor deleted any. 724 OMPInfoCache.recollectUses(); 725 726 Changed |= deleteParallelRegions(); 727 Changed |= rewriteDeviceCodeStateMachine(); 728 729 if (HideMemoryTransferLatency) 730 Changed |= hideMemTransfersLatency(); 731 Changed |= deduplicateRuntimeCalls(); 732 if (EnableParallelRegionMerging) { 733 if (mergeParallelRegions()) { 734 deduplicateRuntimeCalls(); 735 Changed = true; 736 } 737 } 738 } 739 740 return Changed; 741 } 742 743 /// Print initial ICV values for testing. 744 /// FIXME: This should be done from the Attributor once it is added. 745 void printICVs() const { 746 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel, 747 ICV_proc_bind}; 748 749 for (Function *F : OMPInfoCache.ModuleSlice) { 750 for (auto ICV : ICVs) { 751 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 752 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 753 return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 754 << " Value: " 755 << (ICVInfo.InitValue 756 ? toString(ICVInfo.InitValue->getValue(), 10, true) 757 : "IMPLEMENTATION_DEFINED"); 758 }; 759 760 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark); 761 } 762 } 763 } 764 765 /// Print OpenMP GPU kernels for testing. 766 void printKernels() const { 767 for (Function *F : SCC) { 768 if (!OMPInfoCache.Kernels.count(F)) 769 continue; 770 771 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 772 return ORA << "OpenMP GPU kernel " 773 << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; 774 }; 775 776 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark); 777 } 778 } 779 780 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 781 /// given it has to be the callee or a nullptr is returned. 782 static CallInst *getCallIfRegularCall( 783 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 784 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 785 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 786 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 787 return CI; 788 return nullptr; 789 } 790 791 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 792 /// the callee or a nullptr is returned. 793 static CallInst *getCallIfRegularCall( 794 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 795 CallInst *CI = dyn_cast<CallInst>(&V); 796 if (CI && !CI->hasOperandBundles() && 797 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 798 return CI; 799 return nullptr; 800 } 801 802 private: 803 /// Merge parallel regions when it is safe. 804 bool mergeParallelRegions() { 805 const unsigned CallbackCalleeOperand = 2; 806 const unsigned CallbackFirstArgOperand = 3; 807 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 808 809 // Check if there are any __kmpc_fork_call calls to merge. 810 OMPInformationCache::RuntimeFunctionInfo &RFI = 811 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 812 813 if (!RFI.Declaration) 814 return false; 815 816 // Unmergable calls that prevent merging a parallel region. 817 OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = { 818 OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind], 819 OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads], 820 }; 821 822 bool Changed = false; 823 LoopInfo *LI = nullptr; 824 DominatorTree *DT = nullptr; 825 826 SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap; 827 828 BasicBlock *StartBB = nullptr, *EndBB = nullptr; 829 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 830 BasicBlock &ContinuationIP) { 831 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 832 BasicBlock *CGEndBB = 833 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 834 assert(StartBB != nullptr && "StartBB should not be null"); 835 CGStartBB->getTerminator()->setSuccessor(0, StartBB); 836 assert(EndBB != nullptr && "EndBB should not be null"); 837 EndBB->getTerminator()->setSuccessor(0, CGEndBB); 838 }; 839 840 auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &, 841 Value &Inner, Value *&ReplacementValue) -> InsertPointTy { 842 ReplacementValue = &Inner; 843 return CodeGenIP; 844 }; 845 846 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 847 848 /// Create a sequential execution region within a merged parallel region, 849 /// encapsulated in a master construct with a barrier for synchronization. 850 auto CreateSequentialRegion = [&](Function *OuterFn, 851 BasicBlock *OuterPredBB, 852 Instruction *SeqStartI, 853 Instruction *SeqEndI) { 854 // Isolate the instructions of the sequential region to a separate 855 // block. 856 BasicBlock *ParentBB = SeqStartI->getParent(); 857 BasicBlock *SeqEndBB = 858 SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI); 859 BasicBlock *SeqAfterBB = 860 SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI); 861 BasicBlock *SeqStartBB = 862 SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged"); 863 864 assert(ParentBB->getUniqueSuccessor() == SeqStartBB && 865 "Expected a different CFG"); 866 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 867 ParentBB->getTerminator()->eraseFromParent(); 868 869 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 870 BasicBlock &ContinuationIP) { 871 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 872 BasicBlock *CGEndBB = 873 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 874 assert(SeqStartBB != nullptr && "SeqStartBB should not be null"); 875 CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB); 876 assert(SeqEndBB != nullptr && "SeqEndBB should not be null"); 877 SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB); 878 }; 879 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 880 881 // Find outputs from the sequential region to outside users and 882 // broadcast their values to them. 883 for (Instruction &I : *SeqStartBB) { 884 SmallPtrSet<Instruction *, 4> OutsideUsers; 885 for (User *Usr : I.users()) { 886 Instruction &UsrI = *cast<Instruction>(Usr); 887 // Ignore outputs to LT intrinsics, code extraction for the merged 888 // parallel region will fix them. 889 if (UsrI.isLifetimeStartOrEnd()) 890 continue; 891 892 if (UsrI.getParent() != SeqStartBB) 893 OutsideUsers.insert(&UsrI); 894 } 895 896 if (OutsideUsers.empty()) 897 continue; 898 899 // Emit an alloca in the outer region to store the broadcasted 900 // value. 901 const DataLayout &DL = M.getDataLayout(); 902 AllocaInst *AllocaI = new AllocaInst( 903 I.getType(), DL.getAllocaAddrSpace(), nullptr, 904 I.getName() + ".seq.output.alloc", &OuterFn->front().front()); 905 906 // Emit a store instruction in the sequential BB to update the 907 // value. 908 new StoreInst(&I, AllocaI, SeqStartBB->getTerminator()); 909 910 // Emit a load instruction and replace the use of the output value 911 // with it. 912 for (Instruction *UsrI : OutsideUsers) { 913 LoadInst *LoadI = new LoadInst( 914 I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI); 915 UsrI->replaceUsesOfWith(&I, LoadI); 916 } 917 } 918 919 OpenMPIRBuilder::LocationDescription Loc( 920 InsertPointTy(ParentBB, ParentBB->end()), DL); 921 InsertPointTy SeqAfterIP = 922 OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB); 923 924 OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel); 925 926 BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock()); 927 928 LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn 929 << "\n"); 930 }; 931 932 // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all 933 // contained in BB and only separated by instructions that can be 934 // redundantly executed in parallel. The block BB is split before the first 935 // call (in MergableCIs) and after the last so the entire region we merge 936 // into a single parallel region is contained in a single basic block 937 // without any other instructions. We use the OpenMPIRBuilder to outline 938 // that block and call the resulting function via __kmpc_fork_call. 939 auto Merge = [&](SmallVectorImpl<CallInst *> &MergableCIs, BasicBlock *BB) { 940 // TODO: Change the interface to allow single CIs expanded, e.g, to 941 // include an outer loop. 942 assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs"); 943 944 auto Remark = [&](OptimizationRemark OR) { 945 OR << "Parallel region merged with parallel region" 946 << (MergableCIs.size() > 2 ? "s" : "") << " at "; 947 for (auto *CI : llvm::drop_begin(MergableCIs)) { 948 OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()); 949 if (CI != MergableCIs.back()) 950 OR << ", "; 951 } 952 return OR << "."; 953 }; 954 955 emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark); 956 957 Function *OriginalFn = BB->getParent(); 958 LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size() 959 << " parallel regions in " << OriginalFn->getName() 960 << "\n"); 961 962 // Isolate the calls to merge in a separate block. 963 EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI); 964 BasicBlock *AfterBB = 965 SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI); 966 StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr, 967 "omp.par.merged"); 968 969 assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG"); 970 const DebugLoc DL = BB->getTerminator()->getDebugLoc(); 971 BB->getTerminator()->eraseFromParent(); 972 973 // Create sequential regions for sequential instructions that are 974 // in-between mergable parallel regions. 975 for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1; 976 It != End; ++It) { 977 Instruction *ForkCI = *It; 978 Instruction *NextForkCI = *(It + 1); 979 980 // Continue if there are not in-between instructions. 981 if (ForkCI->getNextNode() == NextForkCI) 982 continue; 983 984 CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(), 985 NextForkCI->getPrevNode()); 986 } 987 988 OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()), 989 DL); 990 IRBuilder<>::InsertPoint AllocaIP( 991 &OriginalFn->getEntryBlock(), 992 OriginalFn->getEntryBlock().getFirstInsertionPt()); 993 // Create the merged parallel region with default proc binding, to 994 // avoid overriding binding settings, and without explicit cancellation. 995 InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel( 996 Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr, 997 OMP_PROC_BIND_default, /* IsCancellable */ false); 998 BranchInst::Create(AfterBB, AfterIP.getBlock()); 999 1000 // Perform the actual outlining. 1001 OMPInfoCache.OMPBuilder.finalize(OriginalFn, 1002 /* AllowExtractorSinking */ true); 1003 1004 Function *OutlinedFn = MergableCIs.front()->getCaller(); 1005 1006 // Replace the __kmpc_fork_call calls with direct calls to the outlined 1007 // callbacks. 1008 SmallVector<Value *, 8> Args; 1009 for (auto *CI : MergableCIs) { 1010 Value *Callee = 1011 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts(); 1012 FunctionType *FT = 1013 cast<FunctionType>(Callee->getType()->getPointerElementType()); 1014 Args.clear(); 1015 Args.push_back(OutlinedFn->getArg(0)); 1016 Args.push_back(OutlinedFn->getArg(1)); 1017 for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands(); 1018 U < E; ++U) 1019 Args.push_back(CI->getArgOperand(U)); 1020 1021 CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI); 1022 if (CI->getDebugLoc()) 1023 NewCI->setDebugLoc(CI->getDebugLoc()); 1024 1025 // Forward parameter attributes from the callback to the callee. 1026 for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands(); 1027 U < E; ++U) 1028 for (const Attribute &A : CI->getAttributes().getParamAttributes(U)) 1029 NewCI->addParamAttr( 1030 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A); 1031 1032 // Emit an explicit barrier to replace the implicit fork-join barrier. 1033 if (CI != MergableCIs.back()) { 1034 // TODO: Remove barrier if the merged parallel region includes the 1035 // 'nowait' clause. 1036 OMPInfoCache.OMPBuilder.createBarrier( 1037 InsertPointTy(NewCI->getParent(), 1038 NewCI->getNextNode()->getIterator()), 1039 OMPD_parallel); 1040 } 1041 1042 CI->eraseFromParent(); 1043 } 1044 1045 assert(OutlinedFn != OriginalFn && "Outlining failed"); 1046 CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); 1047 CGUpdater.reanalyzeFunction(*OriginalFn); 1048 1049 NumOpenMPParallelRegionsMerged += MergableCIs.size(); 1050 1051 return true; 1052 }; 1053 1054 // Helper function that identifes sequences of 1055 // __kmpc_fork_call uses in a basic block. 1056 auto DetectPRsCB = [&](Use &U, Function &F) { 1057 CallInst *CI = getCallIfRegularCall(U, &RFI); 1058 BB2PRMap[CI->getParent()].insert(CI); 1059 1060 return false; 1061 }; 1062 1063 BB2PRMap.clear(); 1064 RFI.foreachUse(SCC, DetectPRsCB); 1065 SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector; 1066 // Find mergable parallel regions within a basic block that are 1067 // safe to merge, that is any in-between instructions can safely 1068 // execute in parallel after merging. 1069 // TODO: support merging across basic-blocks. 1070 for (auto &It : BB2PRMap) { 1071 auto &CIs = It.getSecond(); 1072 if (CIs.size() < 2) 1073 continue; 1074 1075 BasicBlock *BB = It.getFirst(); 1076 SmallVector<CallInst *, 4> MergableCIs; 1077 1078 /// Returns true if the instruction is mergable, false otherwise. 1079 /// A terminator instruction is unmergable by definition since merging 1080 /// works within a BB. Instructions before the mergable region are 1081 /// mergable if they are not calls to OpenMP runtime functions that may 1082 /// set different execution parameters for subsequent parallel regions. 1083 /// Instructions in-between parallel regions are mergable if they are not 1084 /// calls to any non-intrinsic function since that may call a non-mergable 1085 /// OpenMP runtime function. 1086 auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) { 1087 // We do not merge across BBs, hence return false (unmergable) if the 1088 // instruction is a terminator. 1089 if (I.isTerminator()) 1090 return false; 1091 1092 if (!isa<CallInst>(&I)) 1093 return true; 1094 1095 CallInst *CI = cast<CallInst>(&I); 1096 if (IsBeforeMergableRegion) { 1097 Function *CalledFunction = CI->getCalledFunction(); 1098 if (!CalledFunction) 1099 return false; 1100 // Return false (unmergable) if the call before the parallel 1101 // region calls an explicit affinity (proc_bind) or number of 1102 // threads (num_threads) compiler-generated function. Those settings 1103 // may be incompatible with following parallel regions. 1104 // TODO: ICV tracking to detect compatibility. 1105 for (const auto &RFI : UnmergableCallsInfo) { 1106 if (CalledFunction == RFI.Declaration) 1107 return false; 1108 } 1109 } else { 1110 // Return false (unmergable) if there is a call instruction 1111 // in-between parallel regions when it is not an intrinsic. It 1112 // may call an unmergable OpenMP runtime function in its callpath. 1113 // TODO: Keep track of possible OpenMP calls in the callpath. 1114 if (!isa<IntrinsicInst>(CI)) 1115 return false; 1116 } 1117 1118 return true; 1119 }; 1120 // Find maximal number of parallel region CIs that are safe to merge. 1121 for (auto It = BB->begin(), End = BB->end(); It != End;) { 1122 Instruction &I = *It; 1123 ++It; 1124 1125 if (CIs.count(&I)) { 1126 MergableCIs.push_back(cast<CallInst>(&I)); 1127 continue; 1128 } 1129 1130 // Continue expanding if the instruction is mergable. 1131 if (IsMergable(I, MergableCIs.empty())) 1132 continue; 1133 1134 // Forward the instruction iterator to skip the next parallel region 1135 // since there is an unmergable instruction which can affect it. 1136 for (; It != End; ++It) { 1137 Instruction &SkipI = *It; 1138 if (CIs.count(&SkipI)) { 1139 LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI 1140 << " due to " << I << "\n"); 1141 ++It; 1142 break; 1143 } 1144 } 1145 1146 // Store mergable regions found. 1147 if (MergableCIs.size() > 1) { 1148 MergableCIsVector.push_back(MergableCIs); 1149 LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size() 1150 << " parallel regions in block " << BB->getName() 1151 << " of function " << BB->getParent()->getName() 1152 << "\n";); 1153 } 1154 1155 MergableCIs.clear(); 1156 } 1157 1158 if (!MergableCIsVector.empty()) { 1159 Changed = true; 1160 1161 for (auto &MergableCIs : MergableCIsVector) 1162 Merge(MergableCIs, BB); 1163 MergableCIsVector.clear(); 1164 } 1165 } 1166 1167 if (Changed) { 1168 /// Re-collect use for fork calls, emitted barrier calls, and 1169 /// any emitted master/end_master calls. 1170 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call); 1171 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier); 1172 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master); 1173 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master); 1174 } 1175 1176 return Changed; 1177 } 1178 1179 /// Try to delete parallel regions if possible. 1180 bool deleteParallelRegions() { 1181 const unsigned CallbackCalleeOperand = 2; 1182 1183 OMPInformationCache::RuntimeFunctionInfo &RFI = 1184 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 1185 1186 if (!RFI.Declaration) 1187 return false; 1188 1189 bool Changed = false; 1190 auto DeleteCallCB = [&](Use &U, Function &) { 1191 CallInst *CI = getCallIfRegularCall(U); 1192 if (!CI) 1193 return false; 1194 auto *Fn = dyn_cast<Function>( 1195 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 1196 if (!Fn) 1197 return false; 1198 if (!Fn->onlyReadsMemory()) 1199 return false; 1200 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 1201 return false; 1202 1203 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 1204 << CI->getCaller()->getName() << "\n"); 1205 1206 auto Remark = [&](OptimizationRemark OR) { 1207 return OR << "Removing parallel region with no side-effects."; 1208 }; 1209 emitRemark<OptimizationRemark>(CI, "OMP160", Remark); 1210 1211 CGUpdater.removeCallSite(*CI); 1212 CI->eraseFromParent(); 1213 Changed = true; 1214 ++NumOpenMPParallelRegionsDeleted; 1215 return true; 1216 }; 1217 1218 RFI.foreachUse(SCC, DeleteCallCB); 1219 1220 return Changed; 1221 } 1222 1223 /// Try to eliminate runtime calls by reusing existing ones. 1224 bool deduplicateRuntimeCalls() { 1225 bool Changed = false; 1226 1227 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 1228 OMPRTL_omp_get_num_threads, 1229 OMPRTL_omp_in_parallel, 1230 OMPRTL_omp_get_cancellation, 1231 OMPRTL_omp_get_thread_limit, 1232 OMPRTL_omp_get_supported_active_levels, 1233 OMPRTL_omp_get_level, 1234 OMPRTL_omp_get_ancestor_thread_num, 1235 OMPRTL_omp_get_team_size, 1236 OMPRTL_omp_get_active_level, 1237 OMPRTL_omp_in_final, 1238 OMPRTL_omp_get_proc_bind, 1239 OMPRTL_omp_get_num_places, 1240 OMPRTL_omp_get_num_procs, 1241 OMPRTL_omp_get_place_num, 1242 OMPRTL_omp_get_partition_num_places, 1243 OMPRTL_omp_get_partition_place_nums}; 1244 1245 // Global-tid is handled separately. 1246 SmallSetVector<Value *, 16> GTIdArgs; 1247 collectGlobalThreadIdArguments(GTIdArgs); 1248 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 1249 << " global thread ID arguments\n"); 1250 1251 for (Function *F : SCC) { 1252 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 1253 Changed |= deduplicateRuntimeCalls( 1254 *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 1255 1256 // __kmpc_global_thread_num is special as we can replace it with an 1257 // argument in enough cases to make it worth trying. 1258 Value *GTIdArg = nullptr; 1259 for (Argument &Arg : F->args()) 1260 if (GTIdArgs.count(&Arg)) { 1261 GTIdArg = &Arg; 1262 break; 1263 } 1264 Changed |= deduplicateRuntimeCalls( 1265 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 1266 } 1267 1268 return Changed; 1269 } 1270 1271 /// Tries to hide the latency of runtime calls that involve host to 1272 /// device memory transfers by splitting them into their "issue" and "wait" 1273 /// versions. The "issue" is moved upwards as much as possible. The "wait" is 1274 /// moved downards as much as possible. The "issue" issues the memory transfer 1275 /// asynchronously, returning a handle. The "wait" waits in the returned 1276 /// handle for the memory transfer to finish. 1277 bool hideMemTransfersLatency() { 1278 auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; 1279 bool Changed = false; 1280 auto SplitMemTransfers = [&](Use &U, Function &Decl) { 1281 auto *RTCall = getCallIfRegularCall(U, &RFI); 1282 if (!RTCall) 1283 return false; 1284 1285 OffloadArray OffloadArrays[3]; 1286 if (!getValuesInOffloadArrays(*RTCall, OffloadArrays)) 1287 return false; 1288 1289 LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays)); 1290 1291 // TODO: Check if can be moved upwards. 1292 bool WasSplit = false; 1293 Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); 1294 if (WaitMovementPoint) 1295 WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); 1296 1297 Changed |= WasSplit; 1298 return WasSplit; 1299 }; 1300 RFI.foreachUse(SCC, SplitMemTransfers); 1301 1302 return Changed; 1303 } 1304 1305 void analysisGlobalization() { 1306 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 1307 1308 auto CheckGlobalization = [&](Use &U, Function &Decl) { 1309 if (CallInst *CI = getCallIfRegularCall(U, &RFI)) { 1310 auto Remark = [&](OptimizationRemarkMissed ORM) { 1311 return ORM 1312 << "Found thread data sharing on the GPU. " 1313 << "Expect degraded performance due to data globalization."; 1314 }; 1315 emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark); 1316 } 1317 1318 return false; 1319 }; 1320 1321 RFI.foreachUse(SCC, CheckGlobalization); 1322 } 1323 1324 /// Maps the values stored in the offload arrays passed as arguments to 1325 /// \p RuntimeCall into the offload arrays in \p OAs. 1326 bool getValuesInOffloadArrays(CallInst &RuntimeCall, 1327 MutableArrayRef<OffloadArray> OAs) { 1328 assert(OAs.size() == 3 && "Need space for three offload arrays!"); 1329 1330 // A runtime call that involves memory offloading looks something like: 1331 // call void @__tgt_target_data_begin_mapper(arg0, arg1, 1332 // i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes, 1333 // ...) 1334 // So, the idea is to access the allocas that allocate space for these 1335 // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes. 1336 // Therefore: 1337 // i8** %offload_baseptrs. 1338 Value *BasePtrsArg = 1339 RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum); 1340 // i8** %offload_ptrs. 1341 Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum); 1342 // i8** %offload_sizes. 1343 Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum); 1344 1345 // Get values stored in **offload_baseptrs. 1346 auto *V = getUnderlyingObject(BasePtrsArg); 1347 if (!isa<AllocaInst>(V)) 1348 return false; 1349 auto *BasePtrsArray = cast<AllocaInst>(V); 1350 if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall)) 1351 return false; 1352 1353 // Get values stored in **offload_baseptrs. 1354 V = getUnderlyingObject(PtrsArg); 1355 if (!isa<AllocaInst>(V)) 1356 return false; 1357 auto *PtrsArray = cast<AllocaInst>(V); 1358 if (!OAs[1].initialize(*PtrsArray, RuntimeCall)) 1359 return false; 1360 1361 // Get values stored in **offload_sizes. 1362 V = getUnderlyingObject(SizesArg); 1363 // If it's a [constant] global array don't analyze it. 1364 if (isa<GlobalValue>(V)) 1365 return isa<Constant>(V); 1366 if (!isa<AllocaInst>(V)) 1367 return false; 1368 1369 auto *SizesArray = cast<AllocaInst>(V); 1370 if (!OAs[2].initialize(*SizesArray, RuntimeCall)) 1371 return false; 1372 1373 return true; 1374 } 1375 1376 /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG. 1377 /// For now this is a way to test that the function getValuesInOffloadArrays 1378 /// is working properly. 1379 /// TODO: Move this to a unittest when unittests are available for OpenMPOpt. 1380 void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) { 1381 assert(OAs.size() == 3 && "There are three offload arrays to debug!"); 1382 1383 LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n"); 1384 std::string ValuesStr; 1385 raw_string_ostream Printer(ValuesStr); 1386 std::string Separator = " --- "; 1387 1388 for (auto *BP : OAs[0].StoredValues) { 1389 BP->print(Printer); 1390 Printer << Separator; 1391 } 1392 LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n"); 1393 ValuesStr.clear(); 1394 1395 for (auto *P : OAs[1].StoredValues) { 1396 P->print(Printer); 1397 Printer << Separator; 1398 } 1399 LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n"); 1400 ValuesStr.clear(); 1401 1402 for (auto *S : OAs[2].StoredValues) { 1403 S->print(Printer); 1404 Printer << Separator; 1405 } 1406 LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n"); 1407 } 1408 1409 /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be 1410 /// moved. Returns nullptr if the movement is not possible, or not worth it. 1411 Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { 1412 // FIXME: This traverses only the BasicBlock where RuntimeCall is. 1413 // Make it traverse the CFG. 1414 1415 Instruction *CurrentI = &RuntimeCall; 1416 bool IsWorthIt = false; 1417 while ((CurrentI = CurrentI->getNextNode())) { 1418 1419 // TODO: Once we detect the regions to be offloaded we should use the 1420 // alias analysis manager to check if CurrentI may modify one of 1421 // the offloaded regions. 1422 if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { 1423 if (IsWorthIt) 1424 return CurrentI; 1425 1426 return nullptr; 1427 } 1428 1429 // FIXME: For now if we move it over anything without side effect 1430 // is worth it. 1431 IsWorthIt = true; 1432 } 1433 1434 // Return end of BasicBlock. 1435 return RuntimeCall.getParent()->getTerminator(); 1436 } 1437 1438 /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. 1439 bool splitTargetDataBeginRTC(CallInst &RuntimeCall, 1440 Instruction &WaitMovementPoint) { 1441 // Create stack allocated handle (__tgt_async_info) at the beginning of the 1442 // function. Used for storing information of the async transfer, allowing to 1443 // wait on it later. 1444 auto &IRBuilder = OMPInfoCache.OMPBuilder; 1445 auto *F = RuntimeCall.getCaller(); 1446 Instruction *FirstInst = &(F->getEntryBlock().front()); 1447 AllocaInst *Handle = new AllocaInst( 1448 IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst); 1449 1450 // Add "issue" runtime call declaration: 1451 // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, 1452 // i8**, i8**, i64*, i64*) 1453 FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( 1454 M, OMPRTL___tgt_target_data_begin_mapper_issue); 1455 1456 // Change RuntimeCall call site for its asynchronous version. 1457 SmallVector<Value *, 16> Args; 1458 for (auto &Arg : RuntimeCall.args()) 1459 Args.push_back(Arg.get()); 1460 Args.push_back(Handle); 1461 1462 CallInst *IssueCallsite = 1463 CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall); 1464 RuntimeCall.eraseFromParent(); 1465 1466 // Add "wait" runtime call declaration: 1467 // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) 1468 FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( 1469 M, OMPRTL___tgt_target_data_begin_mapper_wait); 1470 1471 Value *WaitParams[2] = { 1472 IssueCallsite->getArgOperand( 1473 OffloadArray::DeviceIDArgNum), // device_id. 1474 Handle // handle to wait on. 1475 }; 1476 CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); 1477 1478 return true; 1479 } 1480 1481 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 1482 bool GlobalOnly, bool &SingleChoice) { 1483 if (CurrentIdent == NextIdent) 1484 return CurrentIdent; 1485 1486 // TODO: Figure out how to actually combine multiple debug locations. For 1487 // now we just keep an existing one if there is a single choice. 1488 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 1489 SingleChoice = !CurrentIdent; 1490 return NextIdent; 1491 } 1492 return nullptr; 1493 } 1494 1495 /// Return an `struct ident_t*` value that represents the ones used in the 1496 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 1497 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 1498 /// return value we create one from scratch. We also do not yet combine 1499 /// information, e.g., the source locations, see combinedIdentStruct. 1500 Value * 1501 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 1502 Function &F, bool GlobalOnly) { 1503 bool SingleChoice = true; 1504 Value *Ident = nullptr; 1505 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 1506 CallInst *CI = getCallIfRegularCall(U, &RFI); 1507 if (!CI || &F != &Caller) 1508 return false; 1509 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 1510 /* GlobalOnly */ true, SingleChoice); 1511 return false; 1512 }; 1513 RFI.foreachUse(SCC, CombineIdentStruct); 1514 1515 if (!Ident || !SingleChoice) { 1516 // The IRBuilder uses the insertion block to get to the module, this is 1517 // unfortunate but we work around it for now. 1518 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 1519 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 1520 &F.getEntryBlock(), F.getEntryBlock().begin())); 1521 // Create a fallback location if non was found. 1522 // TODO: Use the debug locations of the calls instead. 1523 Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(); 1524 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc); 1525 } 1526 return Ident; 1527 } 1528 1529 /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or 1530 /// \p ReplVal if given. 1531 bool deduplicateRuntimeCalls(Function &F, 1532 OMPInformationCache::RuntimeFunctionInfo &RFI, 1533 Value *ReplVal = nullptr) { 1534 auto *UV = RFI.getUseVector(F); 1535 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 1536 return false; 1537 1538 LLVM_DEBUG( 1539 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 1540 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 1541 1542 assert((!ReplVal || (isa<Argument>(ReplVal) && 1543 cast<Argument>(ReplVal)->getParent() == &F)) && 1544 "Unexpected replacement value!"); 1545 1546 // TODO: Use dominance to find a good position instead. 1547 auto CanBeMoved = [this](CallBase &CB) { 1548 unsigned NumArgs = CB.getNumArgOperands(); 1549 if (NumArgs == 0) 1550 return true; 1551 if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) 1552 return false; 1553 for (unsigned u = 1; u < NumArgs; ++u) 1554 if (isa<Instruction>(CB.getArgOperand(u))) 1555 return false; 1556 return true; 1557 }; 1558 1559 if (!ReplVal) { 1560 for (Use *U : *UV) 1561 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 1562 if (!CanBeMoved(*CI)) 1563 continue; 1564 1565 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 1566 ReplVal = CI; 1567 break; 1568 } 1569 if (!ReplVal) 1570 return false; 1571 } 1572 1573 // If we use a call as a replacement value we need to make sure the ident is 1574 // valid at the new location. For now we just pick a global one, either 1575 // existing and used by one of the calls, or created from scratch. 1576 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 1577 if (CI->getNumArgOperands() > 0 && 1578 CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { 1579 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 1580 /* GlobalOnly */ true); 1581 CI->setArgOperand(0, Ident); 1582 } 1583 } 1584 1585 bool Changed = false; 1586 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 1587 CallInst *CI = getCallIfRegularCall(U, &RFI); 1588 if (!CI || CI == ReplVal || &F != &Caller) 1589 return false; 1590 assert(CI->getCaller() == &F && "Unexpected call!"); 1591 1592 auto Remark = [&](OptimizationRemark OR) { 1593 return OR << "OpenMP runtime call " 1594 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated."; 1595 }; 1596 if (CI->getDebugLoc()) 1597 emitRemark<OptimizationRemark>(CI, "OMP170", Remark); 1598 else 1599 emitRemark<OptimizationRemark>(&F, "OMP170", Remark); 1600 1601 CGUpdater.removeCallSite(*CI); 1602 CI->replaceAllUsesWith(ReplVal); 1603 CI->eraseFromParent(); 1604 ++NumOpenMPRuntimeCallsDeduplicated; 1605 Changed = true; 1606 return true; 1607 }; 1608 RFI.foreachUse(SCC, ReplaceAndDeleteCB); 1609 1610 return Changed; 1611 } 1612 1613 /// Collect arguments that represent the global thread id in \p GTIdArgs. 1614 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 1615 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 1616 // initialization. We could define an AbstractAttribute instead and 1617 // run the Attributor here once it can be run as an SCC pass. 1618 1619 // Helper to check the argument \p ArgNo at all call sites of \p F for 1620 // a GTId. 1621 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 1622 if (!F.hasLocalLinkage()) 1623 return false; 1624 for (Use &U : F.uses()) { 1625 if (CallInst *CI = getCallIfRegularCall(U)) { 1626 Value *ArgOp = CI->getArgOperand(ArgNo); 1627 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 1628 getCallIfRegularCall( 1629 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 1630 continue; 1631 } 1632 return false; 1633 } 1634 return true; 1635 }; 1636 1637 // Helper to identify uses of a GTId as GTId arguments. 1638 auto AddUserArgs = [&](Value >Id) { 1639 for (Use &U : GTId.uses()) 1640 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 1641 if (CI->isArgOperand(&U)) 1642 if (Function *Callee = CI->getCalledFunction()) 1643 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 1644 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 1645 }; 1646 1647 // The argument users of __kmpc_global_thread_num calls are GTIds. 1648 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 1649 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 1650 1651 GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { 1652 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 1653 AddUserArgs(*CI); 1654 return false; 1655 }); 1656 1657 // Transitively search for more arguments by looking at the users of the 1658 // ones we know already. During the search the GTIdArgs vector is extended 1659 // so we cannot cache the size nor can we use a range based for. 1660 for (unsigned u = 0; u < GTIdArgs.size(); ++u) 1661 AddUserArgs(*GTIdArgs[u]); 1662 } 1663 1664 /// Kernel (=GPU) optimizations and utility functions 1665 /// 1666 ///{{ 1667 1668 /// Check if \p F is a kernel, hence entry point for target offloading. 1669 bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); } 1670 1671 /// Cache to remember the unique kernel for a function. 1672 DenseMap<Function *, Optional<Kernel>> UniqueKernelMap; 1673 1674 /// Find the unique kernel that will execute \p F, if any. 1675 Kernel getUniqueKernelFor(Function &F); 1676 1677 /// Find the unique kernel that will execute \p I, if any. 1678 Kernel getUniqueKernelFor(Instruction &I) { 1679 return getUniqueKernelFor(*I.getFunction()); 1680 } 1681 1682 /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in 1683 /// the cases we can avoid taking the address of a function. 1684 bool rewriteDeviceCodeStateMachine(); 1685 1686 /// 1687 ///}} 1688 1689 /// Emit a remark generically 1690 /// 1691 /// This template function can be used to generically emit a remark. The 1692 /// RemarkKind should be one of the following: 1693 /// - OptimizationRemark to indicate a successful optimization attempt 1694 /// - OptimizationRemarkMissed to report a failed optimization attempt 1695 /// - OptimizationRemarkAnalysis to provide additional information about an 1696 /// optimization attempt 1697 /// 1698 /// The remark is built using a callback function provided by the caller that 1699 /// takes a RemarkKind as input and returns a RemarkKind. 1700 template <typename RemarkKind, typename RemarkCallBack> 1701 void emitRemark(Instruction *I, StringRef RemarkName, 1702 RemarkCallBack &&RemarkCB) const { 1703 Function *F = I->getParent()->getParent(); 1704 auto &ORE = OREGetter(F); 1705 1706 if (RemarkName.startswith("OMP")) 1707 ORE.emit([&]() { 1708 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)) 1709 << " [" << RemarkName << "]"; 1710 }); 1711 else 1712 ORE.emit( 1713 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); }); 1714 } 1715 1716 /// Emit a remark on a function. 1717 template <typename RemarkKind, typename RemarkCallBack> 1718 void emitRemark(Function *F, StringRef RemarkName, 1719 RemarkCallBack &&RemarkCB) const { 1720 auto &ORE = OREGetter(F); 1721 1722 if (RemarkName.startswith("OMP")) 1723 ORE.emit([&]() { 1724 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)) 1725 << " [" << RemarkName << "]"; 1726 }); 1727 else 1728 ORE.emit( 1729 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); }); 1730 } 1731 1732 /// The underlying module. 1733 Module &M; 1734 1735 /// The SCC we are operating on. 1736 SmallVectorImpl<Function *> &SCC; 1737 1738 /// Callback to update the call graph, the first argument is a removed call, 1739 /// the second an optional replacement call. 1740 CallGraphUpdater &CGUpdater; 1741 1742 /// Callback to get an OptimizationRemarkEmitter from a Function * 1743 OptimizationRemarkGetter OREGetter; 1744 1745 /// OpenMP-specific information cache. Also Used for Attributor runs. 1746 OMPInformationCache &OMPInfoCache; 1747 1748 /// Attributor instance. 1749 Attributor &A; 1750 1751 /// Helper function to run Attributor on SCC. 1752 bool runAttributor(bool IsModulePass) { 1753 if (SCC.empty()) 1754 return false; 1755 1756 registerAAs(IsModulePass); 1757 1758 ChangeStatus Changed = A.run(); 1759 1760 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 1761 << " functions, result: " << Changed << ".\n"); 1762 1763 return Changed == ChangeStatus::CHANGED; 1764 } 1765 1766 /// Populate the Attributor with abstract attribute opportunities in the 1767 /// function. 1768 void registerAAs(bool IsModulePass); 1769 }; 1770 1771 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { 1772 if (!OMPInfoCache.ModuleSlice.count(&F)) 1773 return nullptr; 1774 1775 // Use a scope to keep the lifetime of the CachedKernel short. 1776 { 1777 Optional<Kernel> &CachedKernel = UniqueKernelMap[&F]; 1778 if (CachedKernel) 1779 return *CachedKernel; 1780 1781 // TODO: We should use an AA to create an (optimistic and callback 1782 // call-aware) call graph. For now we stick to simple patterns that 1783 // are less powerful, basically the worst fixpoint. 1784 if (isKernel(F)) { 1785 CachedKernel = Kernel(&F); 1786 return *CachedKernel; 1787 } 1788 1789 CachedKernel = nullptr; 1790 if (!F.hasLocalLinkage()) { 1791 1792 // See https://openmp.llvm.org/remarks/OptimizationRemarks.html 1793 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 1794 return ORA << "Potentially unknown OpenMP target region caller."; 1795 }; 1796 emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark); 1797 1798 return nullptr; 1799 } 1800 } 1801 1802 auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { 1803 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 1804 // Allow use in equality comparisons. 1805 if (Cmp->isEquality()) 1806 return getUniqueKernelFor(*Cmp); 1807 return nullptr; 1808 } 1809 if (auto *CB = dyn_cast<CallBase>(U.getUser())) { 1810 // Allow direct calls. 1811 if (CB->isCallee(&U)) 1812 return getUniqueKernelFor(*CB); 1813 1814 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 1815 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 1816 // Allow the use in __kmpc_parallel_51 calls. 1817 if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI)) 1818 return getUniqueKernelFor(*CB); 1819 return nullptr; 1820 } 1821 // Disallow every other use. 1822 return nullptr; 1823 }; 1824 1825 // TODO: In the future we want to track more than just a unique kernel. 1826 SmallPtrSet<Kernel, 2> PotentialKernels; 1827 OMPInformationCache::foreachUse(F, [&](const Use &U) { 1828 PotentialKernels.insert(GetUniqueKernelForUse(U)); 1829 }); 1830 1831 Kernel K = nullptr; 1832 if (PotentialKernels.size() == 1) 1833 K = *PotentialKernels.begin(); 1834 1835 // Cache the result. 1836 UniqueKernelMap[&F] = K; 1837 1838 return K; 1839 } 1840 1841 bool OpenMPOpt::rewriteDeviceCodeStateMachine() { 1842 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 1843 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 1844 1845 bool Changed = false; 1846 if (!KernelParallelRFI) 1847 return Changed; 1848 1849 for (Function *F : SCC) { 1850 1851 // Check if the function is a use in a __kmpc_parallel_51 call at 1852 // all. 1853 bool UnknownUse = false; 1854 bool KernelParallelUse = false; 1855 unsigned NumDirectCalls = 0; 1856 1857 SmallVector<Use *, 2> ToBeReplacedStateMachineUses; 1858 OMPInformationCache::foreachUse(*F, [&](Use &U) { 1859 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 1860 if (CB->isCallee(&U)) { 1861 ++NumDirectCalls; 1862 return; 1863 } 1864 1865 if (isa<ICmpInst>(U.getUser())) { 1866 ToBeReplacedStateMachineUses.push_back(&U); 1867 return; 1868 } 1869 1870 // Find wrapper functions that represent parallel kernels. 1871 CallInst *CI = 1872 OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI); 1873 const unsigned int WrapperFunctionArgNo = 6; 1874 if (!KernelParallelUse && CI && 1875 CI->getArgOperandNo(&U) == WrapperFunctionArgNo) { 1876 KernelParallelUse = true; 1877 ToBeReplacedStateMachineUses.push_back(&U); 1878 return; 1879 } 1880 UnknownUse = true; 1881 }); 1882 1883 // Do not emit a remark if we haven't seen a __kmpc_parallel_51 1884 // use. 1885 if (!KernelParallelUse) 1886 continue; 1887 1888 // If this ever hits, we should investigate. 1889 // TODO: Checking the number of uses is not a necessary restriction and 1890 // should be lifted. 1891 if (UnknownUse || NumDirectCalls != 1 || 1892 ToBeReplacedStateMachineUses.size() > 2) { 1893 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 1894 return ORA << "Parallel region is used in " 1895 << (UnknownUse ? "unknown" : "unexpected") 1896 << " ways. Will not attempt to rewrite the state machine."; 1897 }; 1898 emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark); 1899 continue; 1900 } 1901 1902 // Even if we have __kmpc_parallel_51 calls, we (for now) give 1903 // up if the function is not called from a unique kernel. 1904 Kernel K = getUniqueKernelFor(*F); 1905 if (!K) { 1906 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 1907 return ORA << "Parallel region is not called from a unique kernel. " 1908 "Will not attempt to rewrite the state machine."; 1909 }; 1910 emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark); 1911 continue; 1912 } 1913 1914 // We now know F is a parallel body function called only from the kernel K. 1915 // We also identified the state machine uses in which we replace the 1916 // function pointer by a new global symbol for identification purposes. This 1917 // ensures only direct calls to the function are left. 1918 1919 Module &M = *F->getParent(); 1920 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 1921 1922 auto *ID = new GlobalVariable( 1923 M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, 1924 UndefValue::get(Int8Ty), F->getName() + ".ID"); 1925 1926 for (Use *U : ToBeReplacedStateMachineUses) 1927 U->set(ConstantExpr::getBitCast(ID, U->get()->getType())); 1928 1929 ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; 1930 1931 Changed = true; 1932 } 1933 1934 return Changed; 1935 } 1936 1937 /// Abstract Attribute for tracking ICV values. 1938 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 1939 using Base = StateWrapper<BooleanState, AbstractAttribute>; 1940 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 1941 1942 void initialize(Attributor &A) override { 1943 Function *F = getAnchorScope(); 1944 if (!F || !A.isFunctionIPOAmendable(*F)) 1945 indicatePessimisticFixpoint(); 1946 } 1947 1948 /// Returns true if value is assumed to be tracked. 1949 bool isAssumedTracked() const { return getAssumed(); } 1950 1951 /// Returns true if value is known to be tracked. 1952 bool isKnownTracked() const { return getAssumed(); } 1953 1954 /// Create an abstract attribute biew for the position \p IRP. 1955 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 1956 1957 /// Return the value with which \p I can be replaced for specific \p ICV. 1958 virtual Optional<Value *> getReplacementValue(InternalControlVar ICV, 1959 const Instruction *I, 1960 Attributor &A) const { 1961 return None; 1962 } 1963 1964 /// Return an assumed unique ICV value if a single candidate is found. If 1965 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1966 /// Optional::NoneType. 1967 virtual Optional<Value *> 1968 getUniqueReplacementValue(InternalControlVar ICV) const = 0; 1969 1970 // Currently only nthreads is being tracked. 1971 // this array will only grow with time. 1972 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 1973 1974 /// See AbstractAttribute::getName() 1975 const std::string getName() const override { return "AAICVTracker"; } 1976 1977 /// See AbstractAttribute::getIdAddr() 1978 const char *getIdAddr() const override { return &ID; } 1979 1980 /// This function should return true if the type of the \p AA is AAICVTracker 1981 static bool classof(const AbstractAttribute *AA) { 1982 return (AA->getIdAddr() == &ID); 1983 } 1984 1985 static const char ID; 1986 }; 1987 1988 struct AAICVTrackerFunction : public AAICVTracker { 1989 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 1990 : AAICVTracker(IRP, A) {} 1991 1992 // FIXME: come up with better string. 1993 const std::string getAsStr() const override { return "ICVTrackerFunction"; } 1994 1995 // FIXME: come up with some stats. 1996 void trackStatistics() const override {} 1997 1998 /// We don't manifest anything for this AA. 1999 ChangeStatus manifest(Attributor &A) override { 2000 return ChangeStatus::UNCHANGED; 2001 } 2002 2003 // Map of ICV to their values at specific program point. 2004 EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar, 2005 InternalControlVar::ICV___last> 2006 ICVReplacementValuesMap; 2007 2008 ChangeStatus updateImpl(Attributor &A) override { 2009 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2010 2011 Function *F = getAnchorScope(); 2012 2013 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2014 2015 for (InternalControlVar ICV : TrackableICVs) { 2016 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2017 2018 auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2019 auto TrackValues = [&](Use &U, Function &) { 2020 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 2021 if (!CI) 2022 return false; 2023 2024 // FIXME: handle setters with more that 1 arguments. 2025 /// Track new value. 2026 if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) 2027 HasChanged = ChangeStatus::CHANGED; 2028 2029 return false; 2030 }; 2031 2032 auto CallCheck = [&](Instruction &I) { 2033 Optional<Value *> ReplVal = getValueForCall(A, &I, ICV); 2034 if (ReplVal.hasValue() && 2035 ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) 2036 HasChanged = ChangeStatus::CHANGED; 2037 2038 return true; 2039 }; 2040 2041 // Track all changes of an ICV. 2042 SetterRFI.foreachUse(TrackValues, F); 2043 2044 bool UsedAssumedInformation = false; 2045 A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, 2046 UsedAssumedInformation, 2047 /* CheckBBLivenessOnly */ true); 2048 2049 /// TODO: Figure out a way to avoid adding entry in 2050 /// ICVReplacementValuesMap 2051 Instruction *Entry = &F->getEntryBlock().front(); 2052 if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) 2053 ValuesMap.insert(std::make_pair(Entry, nullptr)); 2054 } 2055 2056 return HasChanged; 2057 } 2058 2059 /// Hepler to check if \p I is a call and get the value for it if it is 2060 /// unique. 2061 Optional<Value *> getValueForCall(Attributor &A, const Instruction *I, 2062 InternalControlVar &ICV) const { 2063 2064 const auto *CB = dyn_cast<CallBase>(I); 2065 if (!CB || CB->hasFnAttr("no_openmp") || 2066 CB->hasFnAttr("no_openmp_routines")) 2067 return None; 2068 2069 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2070 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 2071 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2072 Function *CalledFunction = CB->getCalledFunction(); 2073 2074 // Indirect call, assume ICV changes. 2075 if (CalledFunction == nullptr) 2076 return nullptr; 2077 if (CalledFunction == GetterRFI.Declaration) 2078 return None; 2079 if (CalledFunction == SetterRFI.Declaration) { 2080 if (ICVReplacementValuesMap[ICV].count(I)) 2081 return ICVReplacementValuesMap[ICV].lookup(I); 2082 2083 return nullptr; 2084 } 2085 2086 // Since we don't know, assume it changes the ICV. 2087 if (CalledFunction->isDeclaration()) 2088 return nullptr; 2089 2090 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2091 *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED); 2092 2093 if (ICVTrackingAA.isAssumedTracked()) 2094 return ICVTrackingAA.getUniqueReplacementValue(ICV); 2095 2096 // If we don't know, assume it changes. 2097 return nullptr; 2098 } 2099 2100 // We don't check unique value for a function, so return None. 2101 Optional<Value *> 2102 getUniqueReplacementValue(InternalControlVar ICV) const override { 2103 return None; 2104 } 2105 2106 /// Return the value with which \p I can be replaced for specific \p ICV. 2107 Optional<Value *> getReplacementValue(InternalControlVar ICV, 2108 const Instruction *I, 2109 Attributor &A) const override { 2110 const auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2111 if (ValuesMap.count(I)) 2112 return ValuesMap.lookup(I); 2113 2114 SmallVector<const Instruction *, 16> Worklist; 2115 SmallPtrSet<const Instruction *, 16> Visited; 2116 Worklist.push_back(I); 2117 2118 Optional<Value *> ReplVal; 2119 2120 while (!Worklist.empty()) { 2121 const Instruction *CurrInst = Worklist.pop_back_val(); 2122 if (!Visited.insert(CurrInst).second) 2123 continue; 2124 2125 const BasicBlock *CurrBB = CurrInst->getParent(); 2126 2127 // Go up and look for all potential setters/calls that might change the 2128 // ICV. 2129 while ((CurrInst = CurrInst->getPrevNode())) { 2130 if (ValuesMap.count(CurrInst)) { 2131 Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst); 2132 // Unknown value, track new. 2133 if (!ReplVal.hasValue()) { 2134 ReplVal = NewReplVal; 2135 break; 2136 } 2137 2138 // If we found a new value, we can't know the icv value anymore. 2139 if (NewReplVal.hasValue()) 2140 if (ReplVal != NewReplVal) 2141 return nullptr; 2142 2143 break; 2144 } 2145 2146 Optional<Value *> NewReplVal = getValueForCall(A, CurrInst, ICV); 2147 if (!NewReplVal.hasValue()) 2148 continue; 2149 2150 // Unknown value, track new. 2151 if (!ReplVal.hasValue()) { 2152 ReplVal = NewReplVal; 2153 break; 2154 } 2155 2156 // if (NewReplVal.hasValue()) 2157 // We found a new value, we can't know the icv value anymore. 2158 if (ReplVal != NewReplVal) 2159 return nullptr; 2160 } 2161 2162 // If we are in the same BB and we have a value, we are done. 2163 if (CurrBB == I->getParent() && ReplVal.hasValue()) 2164 return ReplVal; 2165 2166 // Go through all predecessors and add terminators for analysis. 2167 for (const BasicBlock *Pred : predecessors(CurrBB)) 2168 if (const Instruction *Terminator = Pred->getTerminator()) 2169 Worklist.push_back(Terminator); 2170 } 2171 2172 return ReplVal; 2173 } 2174 }; 2175 2176 struct AAICVTrackerFunctionReturned : AAICVTracker { 2177 AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) 2178 : AAICVTracker(IRP, A) {} 2179 2180 // FIXME: come up with better string. 2181 const std::string getAsStr() const override { 2182 return "ICVTrackerFunctionReturned"; 2183 } 2184 2185 // FIXME: come up with some stats. 2186 void trackStatistics() const override {} 2187 2188 /// We don't manifest anything for this AA. 2189 ChangeStatus manifest(Attributor &A) override { 2190 return ChangeStatus::UNCHANGED; 2191 } 2192 2193 // Map of ICV to their values at specific program point. 2194 EnumeratedArray<Optional<Value *>, InternalControlVar, 2195 InternalControlVar::ICV___last> 2196 ICVReplacementValuesMap; 2197 2198 /// Return the value with which \p I can be replaced for specific \p ICV. 2199 Optional<Value *> 2200 getUniqueReplacementValue(InternalControlVar ICV) const override { 2201 return ICVReplacementValuesMap[ICV]; 2202 } 2203 2204 ChangeStatus updateImpl(Attributor &A) override { 2205 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2206 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2207 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2208 2209 if (!ICVTrackingAA.isAssumedTracked()) 2210 return indicatePessimisticFixpoint(); 2211 2212 for (InternalControlVar ICV : TrackableICVs) { 2213 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2214 Optional<Value *> UniqueICVValue; 2215 2216 auto CheckReturnInst = [&](Instruction &I) { 2217 Optional<Value *> NewReplVal = 2218 ICVTrackingAA.getReplacementValue(ICV, &I, A); 2219 2220 // If we found a second ICV value there is no unique returned value. 2221 if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal) 2222 return false; 2223 2224 UniqueICVValue = NewReplVal; 2225 2226 return true; 2227 }; 2228 2229 bool UsedAssumedInformation = false; 2230 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, 2231 UsedAssumedInformation, 2232 /* CheckBBLivenessOnly */ true)) 2233 UniqueICVValue = nullptr; 2234 2235 if (UniqueICVValue == ReplVal) 2236 continue; 2237 2238 ReplVal = UniqueICVValue; 2239 Changed = ChangeStatus::CHANGED; 2240 } 2241 2242 return Changed; 2243 } 2244 }; 2245 2246 struct AAICVTrackerCallSite : AAICVTracker { 2247 AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) 2248 : AAICVTracker(IRP, A) {} 2249 2250 void initialize(Attributor &A) override { 2251 Function *F = getAnchorScope(); 2252 if (!F || !A.isFunctionIPOAmendable(*F)) 2253 indicatePessimisticFixpoint(); 2254 2255 // We only initialize this AA for getters, so we need to know which ICV it 2256 // gets. 2257 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2258 for (InternalControlVar ICV : TrackableICVs) { 2259 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 2260 auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; 2261 if (Getter.Declaration == getAssociatedFunction()) { 2262 AssociatedICV = ICVInfo.Kind; 2263 return; 2264 } 2265 } 2266 2267 /// Unknown ICV. 2268 indicatePessimisticFixpoint(); 2269 } 2270 2271 ChangeStatus manifest(Attributor &A) override { 2272 if (!ReplVal.hasValue() || !ReplVal.getValue()) 2273 return ChangeStatus::UNCHANGED; 2274 2275 A.changeValueAfterManifest(*getCtxI(), **ReplVal); 2276 A.deleteAfterManifest(*getCtxI()); 2277 2278 return ChangeStatus::CHANGED; 2279 } 2280 2281 // FIXME: come up with better string. 2282 const std::string getAsStr() const override { return "ICVTrackerCallSite"; } 2283 2284 // FIXME: come up with some stats. 2285 void trackStatistics() const override {} 2286 2287 InternalControlVar AssociatedICV; 2288 Optional<Value *> ReplVal; 2289 2290 ChangeStatus updateImpl(Attributor &A) override { 2291 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2292 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2293 2294 // We don't have any information, so we assume it changes the ICV. 2295 if (!ICVTrackingAA.isAssumedTracked()) 2296 return indicatePessimisticFixpoint(); 2297 2298 Optional<Value *> NewReplVal = 2299 ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A); 2300 2301 if (ReplVal == NewReplVal) 2302 return ChangeStatus::UNCHANGED; 2303 2304 ReplVal = NewReplVal; 2305 return ChangeStatus::CHANGED; 2306 } 2307 2308 // Return the value with which associated value can be replaced for specific 2309 // \p ICV. 2310 Optional<Value *> 2311 getUniqueReplacementValue(InternalControlVar ICV) const override { 2312 return ReplVal; 2313 } 2314 }; 2315 2316 struct AAICVTrackerCallSiteReturned : AAICVTracker { 2317 AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) 2318 : AAICVTracker(IRP, A) {} 2319 2320 // FIXME: come up with better string. 2321 const std::string getAsStr() const override { 2322 return "ICVTrackerCallSiteReturned"; 2323 } 2324 2325 // FIXME: come up with some stats. 2326 void trackStatistics() const override {} 2327 2328 /// We don't manifest anything for this AA. 2329 ChangeStatus manifest(Attributor &A) override { 2330 return ChangeStatus::UNCHANGED; 2331 } 2332 2333 // Map of ICV to their values at specific program point. 2334 EnumeratedArray<Optional<Value *>, InternalControlVar, 2335 InternalControlVar::ICV___last> 2336 ICVReplacementValuesMap; 2337 2338 /// Return the value with which associated value can be replaced for specific 2339 /// \p ICV. 2340 Optional<Value *> 2341 getUniqueReplacementValue(InternalControlVar ICV) const override { 2342 return ICVReplacementValuesMap[ICV]; 2343 } 2344 2345 ChangeStatus updateImpl(Attributor &A) override { 2346 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2347 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2348 *this, IRPosition::returned(*getAssociatedFunction()), 2349 DepClassTy::REQUIRED); 2350 2351 // We don't have any information, so we assume it changes the ICV. 2352 if (!ICVTrackingAA.isAssumedTracked()) 2353 return indicatePessimisticFixpoint(); 2354 2355 for (InternalControlVar ICV : TrackableICVs) { 2356 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2357 Optional<Value *> NewReplVal = 2358 ICVTrackingAA.getUniqueReplacementValue(ICV); 2359 2360 if (ReplVal == NewReplVal) 2361 continue; 2362 2363 ReplVal = NewReplVal; 2364 Changed = ChangeStatus::CHANGED; 2365 } 2366 return Changed; 2367 } 2368 }; 2369 2370 struct AAExecutionDomainFunction : public AAExecutionDomain { 2371 AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A) 2372 : AAExecutionDomain(IRP, A) {} 2373 2374 const std::string getAsStr() const override { 2375 return "[AAExecutionDomain] " + std::to_string(SingleThreadedBBs.size()) + 2376 "/" + std::to_string(NumBBs) + " BBs thread 0 only."; 2377 } 2378 2379 /// See AbstractAttribute::trackStatistics(). 2380 void trackStatistics() const override {} 2381 2382 void initialize(Attributor &A) override { 2383 Function *F = getAnchorScope(); 2384 for (const auto &BB : *F) 2385 SingleThreadedBBs.insert(&BB); 2386 NumBBs = SingleThreadedBBs.size(); 2387 } 2388 2389 ChangeStatus manifest(Attributor &A) override { 2390 LLVM_DEBUG({ 2391 for (const BasicBlock *BB : SingleThreadedBBs) 2392 dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " " 2393 << BB->getName() << " is executed by a single thread.\n"; 2394 }); 2395 return ChangeStatus::UNCHANGED; 2396 } 2397 2398 ChangeStatus updateImpl(Attributor &A) override; 2399 2400 /// Check if an instruction is executed by a single thread. 2401 bool isExecutedByInitialThreadOnly(const Instruction &I) const override { 2402 return isExecutedByInitialThreadOnly(*I.getParent()); 2403 } 2404 2405 bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override { 2406 return isValidState() && SingleThreadedBBs.contains(&BB); 2407 } 2408 2409 /// Set of basic blocks that are executed by a single thread. 2410 DenseSet<const BasicBlock *> SingleThreadedBBs; 2411 2412 /// Total number of basic blocks in this function. 2413 long unsigned NumBBs; 2414 }; 2415 2416 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) { 2417 Function *F = getAnchorScope(); 2418 ReversePostOrderTraversal<Function *> RPOT(F); 2419 auto NumSingleThreadedBBs = SingleThreadedBBs.size(); 2420 2421 bool AllCallSitesKnown; 2422 auto PredForCallSite = [&](AbstractCallSite ACS) { 2423 const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>( 2424 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2425 DepClassTy::REQUIRED); 2426 return ACS.isDirectCall() && 2427 ExecutionDomainAA.isExecutedByInitialThreadOnly( 2428 *ACS.getInstruction()); 2429 }; 2430 2431 if (!A.checkForAllCallSites(PredForCallSite, *this, 2432 /* RequiresAllCallSites */ true, 2433 AllCallSitesKnown)) 2434 SingleThreadedBBs.erase(&F->getEntryBlock()); 2435 2436 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2437 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 2438 2439 // Check if the edge into the successor block compares the __kmpc_target_init 2440 // result with -1. If we are in non-SPMD-mode that signals only the main 2441 // thread will execute the edge. 2442 auto IsInitialThreadOnly = [&](BranchInst *Edge, BasicBlock *SuccessorBB) { 2443 if (!Edge || !Edge->isConditional()) 2444 return false; 2445 if (Edge->getSuccessor(0) != SuccessorBB) 2446 return false; 2447 2448 auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition()); 2449 if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality()) 2450 return false; 2451 2452 ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 2453 if (!C) 2454 return false; 2455 2456 // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!) 2457 if (C->isAllOnesValue()) { 2458 auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0)); 2459 if (!CB || CB->getCalledFunction() != RFI.Declaration) 2460 return false; 2461 const int InitIsSPMDArgNo = 1; 2462 auto *IsSPMDModeCI = 2463 dyn_cast<ConstantInt>(CB->getOperand(InitIsSPMDArgNo)); 2464 return IsSPMDModeCI && IsSPMDModeCI->isZero(); 2465 } 2466 2467 return false; 2468 }; 2469 2470 // Merge all the predecessor states into the current basic block. A basic 2471 // block is executed by a single thread if all of its predecessors are. 2472 auto MergePredecessorStates = [&](BasicBlock *BB) { 2473 if (pred_begin(BB) == pred_end(BB)) 2474 return SingleThreadedBBs.contains(BB); 2475 2476 bool IsInitialThread = true; 2477 for (auto PredBB = pred_begin(BB), PredEndBB = pred_end(BB); 2478 PredBB != PredEndBB; ++PredBB) { 2479 if (!IsInitialThreadOnly(dyn_cast<BranchInst>((*PredBB)->getTerminator()), 2480 BB)) 2481 IsInitialThread &= SingleThreadedBBs.contains(*PredBB); 2482 } 2483 2484 return IsInitialThread; 2485 }; 2486 2487 for (auto *BB : RPOT) { 2488 if (!MergePredecessorStates(BB)) 2489 SingleThreadedBBs.erase(BB); 2490 } 2491 2492 return (NumSingleThreadedBBs == SingleThreadedBBs.size()) 2493 ? ChangeStatus::UNCHANGED 2494 : ChangeStatus::CHANGED; 2495 } 2496 2497 /// Try to replace memory allocation calls called by a single thread with a 2498 /// static buffer of shared memory. 2499 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> { 2500 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2501 AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2502 2503 /// Create an abstract attribute view for the position \p IRP. 2504 static AAHeapToShared &createForPosition(const IRPosition &IRP, 2505 Attributor &A); 2506 2507 /// See AbstractAttribute::getName(). 2508 const std::string getName() const override { return "AAHeapToShared"; } 2509 2510 /// See AbstractAttribute::getIdAddr(). 2511 const char *getIdAddr() const override { return &ID; } 2512 2513 /// This function should return true if the type of the \p AA is 2514 /// AAHeapToShared. 2515 static bool classof(const AbstractAttribute *AA) { 2516 return (AA->getIdAddr() == &ID); 2517 } 2518 2519 /// Unique ID (due to the unique address) 2520 static const char ID; 2521 }; 2522 2523 struct AAHeapToSharedFunction : public AAHeapToShared { 2524 AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A) 2525 : AAHeapToShared(IRP, A) {} 2526 2527 const std::string getAsStr() const override { 2528 return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) + 2529 " malloc calls eligible."; 2530 } 2531 2532 /// See AbstractAttribute::trackStatistics(). 2533 void trackStatistics() const override {} 2534 2535 void initialize(Attributor &A) override { 2536 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2537 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 2538 2539 for (User *U : RFI.Declaration->users()) 2540 if (CallBase *CB = dyn_cast<CallBase>(U)) 2541 MallocCalls.insert(CB); 2542 } 2543 2544 ChangeStatus manifest(Attributor &A) override { 2545 if (MallocCalls.empty()) 2546 return ChangeStatus::UNCHANGED; 2547 2548 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2549 auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 2550 2551 Function *F = getAnchorScope(); 2552 auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this, 2553 DepClassTy::OPTIONAL); 2554 2555 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2556 for (CallBase *CB : MallocCalls) { 2557 // Skip replacing this if HeapToStack has already claimed it. 2558 if (HS && HS->isAssumedHeapToStack(*CB)) 2559 continue; 2560 2561 // Find the unique free call to remove it. 2562 SmallVector<CallBase *, 4> FreeCalls; 2563 for (auto *U : CB->users()) { 2564 CallBase *C = dyn_cast<CallBase>(U); 2565 if (C && C->getCalledFunction() == FreeCall.Declaration) 2566 FreeCalls.push_back(C); 2567 } 2568 if (FreeCalls.size() != 1) 2569 continue; 2570 2571 ConstantInt *AllocSize = dyn_cast<ConstantInt>(CB->getArgOperand(0)); 2572 2573 LLVM_DEBUG(dbgs() << TAG << "Replace globalization call in " 2574 << CB->getCaller()->getName() << " with " 2575 << AllocSize->getZExtValue() 2576 << " bytes of shared memory\n"); 2577 2578 // Create a new shared memory buffer of the same size as the allocation 2579 // and replace all the uses of the original allocation with it. 2580 Module *M = CB->getModule(); 2581 Type *Int8Ty = Type::getInt8Ty(M->getContext()); 2582 Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue()); 2583 auto *SharedMem = new GlobalVariable( 2584 *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage, 2585 UndefValue::get(Int8ArrTy), CB->getName(), nullptr, 2586 GlobalValue::NotThreadLocal, 2587 static_cast<unsigned>(AddressSpace::Shared)); 2588 auto *NewBuffer = 2589 ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo()); 2590 2591 auto Remark = [&](OptimizationRemark OR) { 2592 return OR << "Replaced globalized variable with " 2593 << ore::NV("SharedMemory", AllocSize->getZExtValue()) 2594 << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ") 2595 << "of shared memory."; 2596 }; 2597 A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark); 2598 2599 SharedMem->setAlignment(MaybeAlign(32)); 2600 2601 A.changeValueAfterManifest(*CB, *NewBuffer); 2602 A.deleteAfterManifest(*CB); 2603 A.deleteAfterManifest(*FreeCalls.front()); 2604 2605 NumBytesMovedToSharedMemory += AllocSize->getZExtValue(); 2606 Changed = ChangeStatus::CHANGED; 2607 } 2608 2609 return Changed; 2610 } 2611 2612 ChangeStatus updateImpl(Attributor &A) override { 2613 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2614 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 2615 Function *F = getAnchorScope(); 2616 2617 auto NumMallocCalls = MallocCalls.size(); 2618 2619 // Only consider malloc calls executed by a single thread with a constant. 2620 for (User *U : RFI.Declaration->users()) { 2621 const auto &ED = A.getAAFor<AAExecutionDomain>( 2622 *this, IRPosition::function(*F), DepClassTy::REQUIRED); 2623 if (CallBase *CB = dyn_cast<CallBase>(U)) 2624 if (!dyn_cast<ConstantInt>(CB->getArgOperand(0)) || 2625 !ED.isExecutedByInitialThreadOnly(*CB)) 2626 MallocCalls.erase(CB); 2627 } 2628 2629 if (NumMallocCalls != MallocCalls.size()) 2630 return ChangeStatus::CHANGED; 2631 2632 return ChangeStatus::UNCHANGED; 2633 } 2634 2635 /// Collection of all malloc calls in a function. 2636 SmallPtrSet<CallBase *, 4> MallocCalls; 2637 }; 2638 2639 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> { 2640 using Base = StateWrapper<KernelInfoState, AbstractAttribute>; 2641 AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2642 2643 /// Statistics are tracked as part of manifest for now. 2644 void trackStatistics() const override {} 2645 2646 /// See AbstractAttribute::getAsStr() 2647 const std::string getAsStr() const override { 2648 if (!isValidState()) 2649 return "<invalid>"; 2650 return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD" 2651 : "generic") + 2652 std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]" 2653 : "") + 2654 std::string(" #PRs: ") + 2655 std::to_string(ReachedKnownParallelRegions.size()) + 2656 ", #Unknown PRs: " + 2657 std::to_string(ReachedUnknownParallelRegions.size()); 2658 } 2659 2660 /// Create an abstract attribute biew for the position \p IRP. 2661 static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A); 2662 2663 /// See AbstractAttribute::getName() 2664 const std::string getName() const override { return "AAKernelInfo"; } 2665 2666 /// See AbstractAttribute::getIdAddr() 2667 const char *getIdAddr() const override { return &ID; } 2668 2669 /// This function should return true if the type of the \p AA is AAKernelInfo 2670 static bool classof(const AbstractAttribute *AA) { 2671 return (AA->getIdAddr() == &ID); 2672 } 2673 2674 static const char ID; 2675 }; 2676 2677 /// The function kernel info abstract attribute, basically, what can we say 2678 /// about a function with regards to the KernelInfoState. 2679 struct AAKernelInfoFunction : AAKernelInfo { 2680 AAKernelInfoFunction(const IRPosition &IRP, Attributor &A) 2681 : AAKernelInfo(IRP, A) {} 2682 2683 /// See AbstractAttribute::initialize(...). 2684 void initialize(Attributor &A) override { 2685 // This is a high-level transform that might change the constant arguments 2686 // of the init and dinit calls. We need to tell the Attributor about this 2687 // to avoid other parts using the current constant value for simpliication. 2688 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2689 2690 Function *Fn = getAnchorScope(); 2691 if (!OMPInfoCache.Kernels.count(Fn)) 2692 return; 2693 2694 // Add itself to the reaching kernel and set IsKernelEntry. 2695 ReachingKernelEntries.insert(Fn); 2696 IsKernelEntry = true; 2697 2698 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 2699 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 2700 OMPInformationCache::RuntimeFunctionInfo &DeinitRFI = 2701 OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit]; 2702 2703 // For kernels we perform more initialization work, first we find the init 2704 // and deinit calls. 2705 auto StoreCallBase = [](Use &U, 2706 OMPInformationCache::RuntimeFunctionInfo &RFI, 2707 CallBase *&Storage) { 2708 CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI); 2709 assert(CB && 2710 "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!"); 2711 assert(!Storage && 2712 "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!"); 2713 Storage = CB; 2714 return false; 2715 }; 2716 InitRFI.foreachUse( 2717 [&](Use &U, Function &) { 2718 StoreCallBase(U, InitRFI, KernelInitCB); 2719 return false; 2720 }, 2721 Fn); 2722 DeinitRFI.foreachUse( 2723 [&](Use &U, Function &) { 2724 StoreCallBase(U, DeinitRFI, KernelDeinitCB); 2725 return false; 2726 }, 2727 Fn); 2728 2729 assert((KernelInitCB && KernelDeinitCB) && 2730 "Kernel without __kmpc_target_init or __kmpc_target_deinit!"); 2731 2732 // For kernels we might need to initialize/finalize the IsSPMD state and 2733 // we need to register a simplification callback so that the Attributor 2734 // knows the constant arguments to __kmpc_target_init and 2735 // __kmpc_target_deinit might actually change. 2736 2737 Attributor::SimplifictionCallbackTy StateMachineSimplifyCB = 2738 [&](const IRPosition &IRP, const AbstractAttribute *AA, 2739 bool &UsedAssumedInformation) -> Optional<Value *> { 2740 // IRP represents the "use generic state machine" argument of an 2741 // __kmpc_target_init call. We will answer this one with the internal 2742 // state. As long as we are not in an invalid state, we will create a 2743 // custom state machine so the value should be a `i1 false`. If we are 2744 // in an invalid state, we won't change the value that is in the IR. 2745 if (!isValidState()) 2746 return nullptr; 2747 if (AA) 2748 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 2749 UsedAssumedInformation = !isAtFixpoint(); 2750 auto *FalseVal = 2751 ConstantInt::getBool(IRP.getAnchorValue().getContext(), 0); 2752 return FalseVal; 2753 }; 2754 2755 Attributor::SimplifictionCallbackTy IsSPMDModeSimplifyCB = 2756 [&](const IRPosition &IRP, const AbstractAttribute *AA, 2757 bool &UsedAssumedInformation) -> Optional<Value *> { 2758 // IRP represents the "SPMDCompatibilityTracker" argument of an 2759 // __kmpc_target_init or 2760 // __kmpc_target_deinit call. We will answer this one with the internal 2761 // state. 2762 if (!isValidState()) 2763 return nullptr; 2764 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 2765 if (AA) 2766 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 2767 UsedAssumedInformation = true; 2768 } else { 2769 UsedAssumedInformation = false; 2770 } 2771 auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(), 2772 SPMDCompatibilityTracker.isAssumed()); 2773 return Val; 2774 }; 2775 2776 constexpr const int InitIsSPMDArgNo = 1; 2777 constexpr const int DeinitIsSPMDArgNo = 1; 2778 constexpr const int InitUseStateMachineArgNo = 2; 2779 A.registerSimplificationCallback( 2780 IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo), 2781 StateMachineSimplifyCB); 2782 A.registerSimplificationCallback( 2783 IRPosition::callsite_argument(*KernelInitCB, InitIsSPMDArgNo), 2784 IsSPMDModeSimplifyCB); 2785 A.registerSimplificationCallback( 2786 IRPosition::callsite_argument(*KernelDeinitCB, DeinitIsSPMDArgNo), 2787 IsSPMDModeSimplifyCB); 2788 2789 // Check if we know we are in SPMD-mode already. 2790 ConstantInt *IsSPMDArg = 2791 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitIsSPMDArgNo)); 2792 if (IsSPMDArg && !IsSPMDArg->isZero()) 2793 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 2794 } 2795 2796 /// Modify the IR based on the KernelInfoState as the fixpoint iteration is 2797 /// finished now. 2798 ChangeStatus manifest(Attributor &A) override { 2799 // If we are not looking at a kernel with __kmpc_target_init and 2800 // __kmpc_target_deinit call we cannot actually manifest the information. 2801 if (!KernelInitCB || !KernelDeinitCB) 2802 return ChangeStatus::UNCHANGED; 2803 2804 // Known SPMD-mode kernels need no manifest changes. 2805 if (SPMDCompatibilityTracker.isKnown()) 2806 return ChangeStatus::UNCHANGED; 2807 2808 // If we can we change the execution mode to SPMD-mode otherwise we build a 2809 // custom state machine. 2810 if (!changeToSPMDMode(A)) 2811 buildCustomStateMachine(A); 2812 2813 return ChangeStatus::CHANGED; 2814 } 2815 2816 bool changeToSPMDMode(Attributor &A) { 2817 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2818 2819 if (!SPMDCompatibilityTracker.isAssumed()) { 2820 for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) { 2821 if (!NonCompatibleI) 2822 continue; 2823 2824 // Skip diagnostics on calls to known OpenMP runtime functions for now. 2825 if (auto *CB = dyn_cast<CallBase>(NonCompatibleI)) 2826 if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction())) 2827 continue; 2828 2829 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2830 ORA << "Value has potential side effects preventing SPMD-mode " 2831 "execution"; 2832 if (isa<CallBase>(NonCompatibleI)) { 2833 ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to " 2834 "the called function to override"; 2835 } 2836 return ORA << "."; 2837 }; 2838 A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121", 2839 Remark); 2840 2841 LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: " 2842 << *NonCompatibleI << "\n"); 2843 } 2844 2845 return false; 2846 } 2847 2848 // Adjust the global exec mode flag that tells the runtime what mode this 2849 // kernel is executed in. 2850 Function *Kernel = getAnchorScope(); 2851 GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable( 2852 (Kernel->getName() + "_exec_mode").str()); 2853 assert(ExecMode && "Kernel without exec mode?"); 2854 assert(ExecMode->getInitializer() && 2855 ExecMode->getInitializer()->isOneValue() && 2856 "Initially non-SPMD kernel has SPMD exec mode!"); 2857 ExecMode->setInitializer( 2858 ConstantInt::get(ExecMode->getInitializer()->getType(), 0)); 2859 2860 // Next rewrite the init and deinit calls to indicate we use SPMD-mode now. 2861 const int InitIsSPMDArgNo = 1; 2862 const int DeinitIsSPMDArgNo = 1; 2863 const int InitUseStateMachineArgNo = 2; 2864 2865 auto &Ctx = getAnchorValue().getContext(); 2866 A.changeUseAfterManifest(KernelInitCB->getArgOperandUse(InitIsSPMDArgNo), 2867 *ConstantInt::getBool(Ctx, 1)); 2868 A.changeUseAfterManifest( 2869 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), 2870 *ConstantInt::getBool(Ctx, 0)); 2871 A.changeUseAfterManifest( 2872 KernelDeinitCB->getArgOperandUse(DeinitIsSPMDArgNo), 2873 *ConstantInt::getBool(Ctx, 1)); 2874 ++NumOpenMPTargetRegionKernelsSPMD; 2875 2876 auto Remark = [&](OptimizationRemark OR) { 2877 return OR << "Transformed generic-mode kernel to SPMD-mode."; 2878 }; 2879 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark); 2880 return true; 2881 }; 2882 2883 ChangeStatus buildCustomStateMachine(Attributor &A) { 2884 assert(ReachedKnownParallelRegions.isValidState() && 2885 "Custom state machine with invalid parallel region states?"); 2886 2887 const int InitIsSPMDArgNo = 1; 2888 const int InitUseStateMachineArgNo = 2; 2889 2890 // Check if the current configuration is non-SPMD and generic state machine. 2891 // If we already have SPMD mode or a custom state machine we do not need to 2892 // go any further. If it is anything but a constant something is weird and 2893 // we give up. 2894 ConstantInt *UseStateMachine = dyn_cast<ConstantInt>( 2895 KernelInitCB->getArgOperand(InitUseStateMachineArgNo)); 2896 ConstantInt *IsSPMD = 2897 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitIsSPMDArgNo)); 2898 2899 // If we are stuck with generic mode, try to create a custom device (=GPU) 2900 // state machine which is specialized for the parallel regions that are 2901 // reachable by the kernel. 2902 if (!UseStateMachine || UseStateMachine->isZero() || !IsSPMD || 2903 !IsSPMD->isZero()) 2904 return ChangeStatus::UNCHANGED; 2905 2906 // If not SPMD mode, indicate we use a custom state machine now. 2907 auto &Ctx = getAnchorValue().getContext(); 2908 auto *FalseVal = ConstantInt::getBool(Ctx, 0); 2909 A.changeUseAfterManifest( 2910 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal); 2911 2912 // If we don't actually need a state machine we are done here. This can 2913 // happen if there simply are no parallel regions. In the resulting kernel 2914 // all worker threads will simply exit right away, leaving the main thread 2915 // to do the work alone. 2916 if (ReachedKnownParallelRegions.empty() && 2917 ReachedUnknownParallelRegions.empty()) { 2918 ++NumOpenMPTargetRegionKernelsWithoutStateMachine; 2919 2920 auto Remark = [&](OptimizationRemark OR) { 2921 return OR << "Removing unused state machine from generic-mode kernel."; 2922 }; 2923 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark); 2924 2925 return ChangeStatus::CHANGED; 2926 } 2927 2928 // Keep track in the statistics of our new shiny custom state machine. 2929 if (ReachedUnknownParallelRegions.empty()) { 2930 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback; 2931 2932 auto Remark = [&](OptimizationRemark OR) { 2933 return OR << "Rewriting generic-mode kernel with a customized state " 2934 "machine."; 2935 }; 2936 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark); 2937 } else { 2938 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback; 2939 2940 auto Remark = [&](OptimizationRemarkAnalysis OR) { 2941 return OR << "Generic-mode kernel is executed with a customized state " 2942 "machine that requires a fallback."; 2943 }; 2944 A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark); 2945 2946 // Tell the user why we ended up with a fallback. 2947 for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) { 2948 if (!UnknownParallelRegionCB) 2949 continue; 2950 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2951 return ORA << "Call may contain unknown parallel regions. Use " 2952 << "`__attribute__((assume(\"omp_no_parallelism\")))` to " 2953 "override."; 2954 }; 2955 A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB, 2956 "OMP133", Remark); 2957 } 2958 } 2959 2960 // Create all the blocks: 2961 // 2962 // InitCB = __kmpc_target_init(...) 2963 // bool IsWorker = InitCB >= 0; 2964 // if (IsWorker) { 2965 // SMBeginBB: __kmpc_barrier_simple_spmd(...); 2966 // void *WorkFn; 2967 // bool Active = __kmpc_kernel_parallel(&WorkFn); 2968 // if (!WorkFn) return; 2969 // SMIsActiveCheckBB: if (Active) { 2970 // SMIfCascadeCurrentBB: if (WorkFn == <ParFn0>) 2971 // ParFn0(...); 2972 // SMIfCascadeCurrentBB: else if (WorkFn == <ParFn1>) 2973 // ParFn1(...); 2974 // ... 2975 // SMIfCascadeCurrentBB: else 2976 // ((WorkFnTy*)WorkFn)(...); 2977 // SMEndParallelBB: __kmpc_kernel_end_parallel(...); 2978 // } 2979 // SMDoneBB: __kmpc_barrier_simple_spmd(...); 2980 // goto SMBeginBB; 2981 // } 2982 // UserCodeEntryBB: // user code 2983 // __kmpc_target_deinit(...) 2984 // 2985 Function *Kernel = getAssociatedFunction(); 2986 assert(Kernel && "Expected an associated function!"); 2987 2988 BasicBlock *InitBB = KernelInitCB->getParent(); 2989 BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock( 2990 KernelInitCB->getNextNode(), "thread.user_code.check"); 2991 BasicBlock *StateMachineBeginBB = BasicBlock::Create( 2992 Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB); 2993 BasicBlock *StateMachineFinishedBB = BasicBlock::Create( 2994 Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB); 2995 BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create( 2996 Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB); 2997 BasicBlock *StateMachineIfCascadeCurrentBB = 2998 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 2999 Kernel, UserCodeEntryBB); 3000 BasicBlock *StateMachineEndParallelBB = 3001 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end", 3002 Kernel, UserCodeEntryBB); 3003 BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create( 3004 Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB); 3005 3006 const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); 3007 ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc); 3008 3009 InitBB->getTerminator()->eraseFromParent(); 3010 Instruction *IsWorker = 3011 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB, 3012 ConstantInt::get(KernelInitCB->getType(), -1), 3013 "thread.is_worker", InitBB); 3014 IsWorker->setDebugLoc(DLoc); 3015 BranchInst::Create(StateMachineBeginBB, UserCodeEntryBB, IsWorker, InitBB); 3016 3017 // Create local storage for the work function pointer. 3018 Type *VoidPtrTy = Type::getInt8PtrTy(Ctx); 3019 AllocaInst *WorkFnAI = new AllocaInst(VoidPtrTy, 0, "worker.work_fn.addr", 3020 &Kernel->getEntryBlock().front()); 3021 WorkFnAI->setDebugLoc(DLoc); 3022 3023 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3024 OMPInfoCache.OMPBuilder.updateToLocation( 3025 OpenMPIRBuilder::LocationDescription( 3026 IRBuilder<>::InsertPoint(StateMachineBeginBB, 3027 StateMachineBeginBB->end()), 3028 DLoc)); 3029 3030 Value *Ident = KernelInitCB->getArgOperand(0); 3031 Value *GTid = KernelInitCB; 3032 3033 Module &M = *Kernel->getParent(); 3034 FunctionCallee BarrierFn = 3035 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3036 M, OMPRTL___kmpc_barrier_simple_spmd); 3037 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB) 3038 ->setDebugLoc(DLoc); 3039 3040 FunctionCallee KernelParallelFn = 3041 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3042 M, OMPRTL___kmpc_kernel_parallel); 3043 Instruction *IsActiveWorker = CallInst::Create( 3044 KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB); 3045 IsActiveWorker->setDebugLoc(DLoc); 3046 Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn", 3047 StateMachineBeginBB); 3048 WorkFn->setDebugLoc(DLoc); 3049 3050 FunctionType *ParallelRegionFnTy = FunctionType::get( 3051 Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)}, 3052 false); 3053 Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 3054 WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast", 3055 StateMachineBeginBB); 3056 3057 Instruction *IsDone = 3058 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn, 3059 Constant::getNullValue(VoidPtrTy), "worker.is_done", 3060 StateMachineBeginBB); 3061 IsDone->setDebugLoc(DLoc); 3062 BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB, 3063 IsDone, StateMachineBeginBB) 3064 ->setDebugLoc(DLoc); 3065 3066 BranchInst::Create(StateMachineIfCascadeCurrentBB, 3067 StateMachineDoneBarrierBB, IsActiveWorker, 3068 StateMachineIsActiveCheckBB) 3069 ->setDebugLoc(DLoc); 3070 3071 Value *ZeroArg = 3072 Constant::getNullValue(ParallelRegionFnTy->getParamType(0)); 3073 3074 // Now that we have most of the CFG skeleton it is time for the if-cascade 3075 // that checks the function pointer we got from the runtime against the 3076 // parallel regions we expect, if there are any. 3077 for (int i = 0, e = ReachedKnownParallelRegions.size(); i < e; ++i) { 3078 auto *ParallelRegion = ReachedKnownParallelRegions[i]; 3079 BasicBlock *PRExecuteBB = BasicBlock::Create( 3080 Ctx, "worker_state_machine.parallel_region.execute", Kernel, 3081 StateMachineEndParallelBB); 3082 CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB) 3083 ->setDebugLoc(DLoc); 3084 BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB) 3085 ->setDebugLoc(DLoc); 3086 3087 BasicBlock *PRNextBB = 3088 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 3089 Kernel, StateMachineEndParallelBB); 3090 3091 // Check if we need to compare the pointer at all or if we can just 3092 // call the parallel region function. 3093 Value *IsPR; 3094 if (i + 1 < e || !ReachedUnknownParallelRegions.empty()) { 3095 Instruction *CmpI = ICmpInst::Create( 3096 ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion, 3097 "worker.check_parallel_region", StateMachineIfCascadeCurrentBB); 3098 CmpI->setDebugLoc(DLoc); 3099 IsPR = CmpI; 3100 } else { 3101 IsPR = ConstantInt::getTrue(Ctx); 3102 } 3103 3104 BranchInst::Create(PRExecuteBB, PRNextBB, IsPR, 3105 StateMachineIfCascadeCurrentBB) 3106 ->setDebugLoc(DLoc); 3107 StateMachineIfCascadeCurrentBB = PRNextBB; 3108 } 3109 3110 // At the end of the if-cascade we place the indirect function pointer call 3111 // in case we might need it, that is if there can be parallel regions we 3112 // have not handled in the if-cascade above. 3113 if (!ReachedUnknownParallelRegions.empty()) { 3114 StateMachineIfCascadeCurrentBB->setName( 3115 "worker_state_machine.parallel_region.fallback.execute"); 3116 CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "", 3117 StateMachineIfCascadeCurrentBB) 3118 ->setDebugLoc(DLoc); 3119 } 3120 BranchInst::Create(StateMachineEndParallelBB, 3121 StateMachineIfCascadeCurrentBB) 3122 ->setDebugLoc(DLoc); 3123 3124 CallInst::Create(OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3125 M, OMPRTL___kmpc_kernel_end_parallel), 3126 {}, "", StateMachineEndParallelBB) 3127 ->setDebugLoc(DLoc); 3128 BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB) 3129 ->setDebugLoc(DLoc); 3130 3131 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB) 3132 ->setDebugLoc(DLoc); 3133 BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB) 3134 ->setDebugLoc(DLoc); 3135 3136 return ChangeStatus::CHANGED; 3137 } 3138 3139 /// Fixpoint iteration update function. Will be called every time a dependence 3140 /// changed its state (and in the beginning). 3141 ChangeStatus updateImpl(Attributor &A) override { 3142 KernelInfoState StateBefore = getState(); 3143 3144 // Callback to check a read/write instruction. 3145 auto CheckRWInst = [&](Instruction &I) { 3146 // We handle calls later. 3147 if (isa<CallBase>(I)) 3148 return true; 3149 // We only care about write effects. 3150 if (!I.mayWriteToMemory()) 3151 return true; 3152 if (auto *SI = dyn_cast<StoreInst>(&I)) { 3153 SmallVector<const Value *> Objects; 3154 getUnderlyingObjects(SI->getPointerOperand(), Objects); 3155 if (llvm::all_of(Objects, 3156 [](const Value *Obj) { return isa<AllocaInst>(Obj); })) 3157 return true; 3158 } 3159 // For now we give up on everything but stores. 3160 SPMDCompatibilityTracker.insert(&I); 3161 return true; 3162 }; 3163 3164 bool UsedAssumedInformationInCheckRWInst = false; 3165 if (!A.checkForAllReadWriteInstructions( 3166 CheckRWInst, *this, UsedAssumedInformationInCheckRWInst)) 3167 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3168 3169 if (!IsKernelEntry) 3170 updateReachingKernelEntries(A); 3171 3172 // Callback to check a call instruction. 3173 auto CheckCallInst = [&](Instruction &I) { 3174 auto &CB = cast<CallBase>(I); 3175 auto &CBAA = A.getAAFor<AAKernelInfo>( 3176 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 3177 if (CBAA.getState().isValidState()) 3178 getState() ^= CBAA.getState(); 3179 return true; 3180 }; 3181 3182 bool UsedAssumedInformationInCheckCallInst = false; 3183 if (!A.checkForAllCallLikeInstructions( 3184 CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) 3185 return indicatePessimisticFixpoint(); 3186 3187 return StateBefore == getState() ? ChangeStatus::UNCHANGED 3188 : ChangeStatus::CHANGED; 3189 } 3190 3191 private: 3192 /// Update info regarding reaching kernels. 3193 void updateReachingKernelEntries(Attributor &A) { 3194 auto PredCallSite = [&](AbstractCallSite ACS) { 3195 Function *Caller = ACS.getInstruction()->getFunction(); 3196 3197 assert(Caller && "Caller is nullptr"); 3198 3199 auto &CAA = 3200 A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller)); 3201 if (CAA.ReachingKernelEntries.isValidState()) { 3202 ReachingKernelEntries ^= CAA.ReachingKernelEntries; 3203 return true; 3204 } 3205 3206 // We lost track of the caller of the associated function, any kernel 3207 // could reach now. 3208 ReachingKernelEntries.indicatePessimisticFixpoint(); 3209 3210 return true; 3211 }; 3212 3213 bool AllCallSitesKnown; 3214 if (!A.checkForAllCallSites(PredCallSite, *this, 3215 true /* RequireAllCallSites */, 3216 AllCallSitesKnown)) 3217 ReachingKernelEntries.indicatePessimisticFixpoint(); 3218 } 3219 }; 3220 3221 /// The call site kernel info abstract attribute, basically, what can we say 3222 /// about a call site with regards to the KernelInfoState. For now this simply 3223 /// forwards the information from the callee. 3224 struct AAKernelInfoCallSite : AAKernelInfo { 3225 AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A) 3226 : AAKernelInfo(IRP, A) {} 3227 3228 /// See AbstractAttribute::initialize(...). 3229 void initialize(Attributor &A) override { 3230 AAKernelInfo::initialize(A); 3231 3232 CallBase &CB = cast<CallBase>(getAssociatedValue()); 3233 Function *Callee = getAssociatedFunction(); 3234 3235 // Helper to lookup an assumption string. 3236 auto HasAssumption = [](Function *Fn, StringRef AssumptionStr) { 3237 return Fn && hasAssumption(*Fn, AssumptionStr); 3238 }; 3239 3240 // Check for SPMD-mode assumptions. 3241 if (HasAssumption(Callee, "ompx_spmd_amenable")) 3242 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 3243 3244 // First weed out calls we do not care about, that is readonly/readnone 3245 // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a 3246 // parallel region or anything else we are looking for. 3247 if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) { 3248 indicateOptimisticFixpoint(); 3249 return; 3250 } 3251 3252 // Next we check if we know the callee. If it is a known OpenMP function 3253 // we will handle them explicitly in the switch below. If it is not, we 3254 // will use an AAKernelInfo object on the callee to gather information and 3255 // merge that into the current state. The latter happens in the updateImpl. 3256 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3257 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 3258 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 3259 // Unknown caller or declarations are not analyzable, we give up. 3260 if (!Callee || !A.isFunctionIPOAmendable(*Callee)) { 3261 3262 // Unknown callees might contain parallel regions, except if they have 3263 // an appropriate assumption attached. 3264 if (!(HasAssumption(Callee, "omp_no_openmp") || 3265 HasAssumption(Callee, "omp_no_parallelism"))) 3266 ReachedUnknownParallelRegions.insert(&CB); 3267 3268 // If SPMDCompatibilityTracker is not fixed, we need to give up on the 3269 // idea we can run something unknown in SPMD-mode. 3270 if (!SPMDCompatibilityTracker.isAtFixpoint()) 3271 SPMDCompatibilityTracker.insert(&CB); 3272 3273 // We have updated the state for this unknown call properly, there won't 3274 // be any change so we indicate a fixpoint. 3275 indicateOptimisticFixpoint(); 3276 } 3277 // If the callee is known and can be used in IPO, we will update the state 3278 // based on the callee state in updateImpl. 3279 return; 3280 } 3281 3282 const unsigned int WrapperFunctionArgNo = 6; 3283 RuntimeFunction RF = It->getSecond(); 3284 switch (RF) { 3285 // All the functions we know are compatible with SPMD mode. 3286 case OMPRTL___kmpc_is_spmd_exec_mode: 3287 case OMPRTL___kmpc_for_static_fini: 3288 case OMPRTL___kmpc_global_thread_num: 3289 case OMPRTL___kmpc_single: 3290 case OMPRTL___kmpc_end_single: 3291 case OMPRTL___kmpc_master: 3292 case OMPRTL___kmpc_end_master: 3293 case OMPRTL___kmpc_barrier: 3294 break; 3295 case OMPRTL___kmpc_for_static_init_4: 3296 case OMPRTL___kmpc_for_static_init_4u: 3297 case OMPRTL___kmpc_for_static_init_8: 3298 case OMPRTL___kmpc_for_static_init_8u: { 3299 // Check the schedule and allow static schedule in SPMD mode. 3300 unsigned ScheduleArgOpNo = 2; 3301 auto *ScheduleTypeCI = 3302 dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo)); 3303 unsigned ScheduleTypeVal = 3304 ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0; 3305 switch (OMPScheduleType(ScheduleTypeVal)) { 3306 case OMPScheduleType::Static: 3307 case OMPScheduleType::StaticChunked: 3308 case OMPScheduleType::Distribute: 3309 case OMPScheduleType::DistributeChunked: 3310 break; 3311 default: 3312 SPMDCompatibilityTracker.insert(&CB); 3313 break; 3314 }; 3315 } break; 3316 case OMPRTL___kmpc_target_init: 3317 KernelInitCB = &CB; 3318 break; 3319 case OMPRTL___kmpc_target_deinit: 3320 KernelDeinitCB = &CB; 3321 break; 3322 case OMPRTL___kmpc_parallel_51: 3323 if (auto *ParallelRegion = dyn_cast<Function>( 3324 CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) { 3325 ReachedKnownParallelRegions.insert(ParallelRegion); 3326 break; 3327 } 3328 // The condition above should usually get the parallel region function 3329 // pointer and record it. In the off chance it doesn't we assume the 3330 // worst. 3331 ReachedUnknownParallelRegions.insert(&CB); 3332 break; 3333 case OMPRTL___kmpc_omp_task: 3334 // We do not look into tasks right now, just give up. 3335 SPMDCompatibilityTracker.insert(&CB); 3336 ReachedUnknownParallelRegions.insert(&CB); 3337 break; 3338 default: 3339 // Unknown OpenMP runtime calls cannot be executed in SPMD-mode, 3340 // generally. 3341 SPMDCompatibilityTracker.insert(&CB); 3342 break; 3343 } 3344 // All other OpenMP runtime calls will not reach parallel regions so they 3345 // can be safely ignored for now. Since it is a known OpenMP runtime call we 3346 // have now modeled all effects and there is no need for any update. 3347 indicateOptimisticFixpoint(); 3348 } 3349 3350 ChangeStatus updateImpl(Attributor &A) override { 3351 // TODO: Once we have call site specific value information we can provide 3352 // call site specific liveness information and then it makes 3353 // sense to specialize attributes for call sites arguments instead of 3354 // redirecting requests to the callee argument. 3355 Function *F = getAssociatedFunction(); 3356 const IRPosition &FnPos = IRPosition::function(*F); 3357 auto &FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED); 3358 if (getState() == FnAA.getState()) 3359 return ChangeStatus::UNCHANGED; 3360 getState() = FnAA.getState(); 3361 return ChangeStatus::CHANGED; 3362 } 3363 }; 3364 3365 struct AAFoldRuntimeCall 3366 : public StateWrapper<BooleanState, AbstractAttribute> { 3367 using Base = StateWrapper<BooleanState, AbstractAttribute>; 3368 3369 AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3370 3371 /// Statistics are tracked as part of manifest for now. 3372 void trackStatistics() const override {} 3373 3374 /// Create an abstract attribute biew for the position \p IRP. 3375 static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP, 3376 Attributor &A); 3377 3378 /// See AbstractAttribute::getName() 3379 const std::string getName() const override { return "AAFoldRuntimeCall"; } 3380 3381 /// See AbstractAttribute::getIdAddr() 3382 const char *getIdAddr() const override { return &ID; } 3383 3384 /// This function should return true if the type of the \p AA is 3385 /// AAFoldRuntimeCall 3386 static bool classof(const AbstractAttribute *AA) { 3387 return (AA->getIdAddr() == &ID); 3388 } 3389 3390 static const char ID; 3391 }; 3392 3393 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { 3394 AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A) 3395 : AAFoldRuntimeCall(IRP, A) {} 3396 3397 /// See AbstractAttribute::getAsStr() 3398 const std::string getAsStr() const override { 3399 if (!isValidState()) 3400 return "<invalid>"; 3401 3402 std::string Str("simplified value: "); 3403 3404 if (!SimplifiedValue.hasValue()) 3405 return Str + std::string("none"); 3406 3407 if (!SimplifiedValue.getValue()) 3408 return Str + std::string("nullptr"); 3409 3410 if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue())) 3411 return Str + std::to_string(CI->getSExtValue()); 3412 3413 return Str + std::string("unknown"); 3414 } 3415 3416 void initialize(Attributor &A) override { 3417 Function *Callee = getAssociatedFunction(); 3418 3419 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3420 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 3421 assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() && 3422 "Expected a known OpenMP runtime function"); 3423 3424 RFKind = It->getSecond(); 3425 3426 CallBase &CB = cast<CallBase>(getAssociatedValue()); 3427 A.registerSimplificationCallback( 3428 IRPosition::callsite_returned(CB), 3429 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3430 bool &UsedAssumedInformation) -> Optional<Value *> { 3431 assert((isValidState() || (SimplifiedValue.hasValue() && 3432 SimplifiedValue.getValue() == nullptr)) && 3433 "Unexpected invalid state!"); 3434 3435 if (!isAtFixpoint()) { 3436 UsedAssumedInformation = true; 3437 if (AA) 3438 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3439 } 3440 return SimplifiedValue; 3441 }); 3442 } 3443 3444 ChangeStatus updateImpl(Attributor &A) override { 3445 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3446 3447 switch (RFKind) { 3448 case OMPRTL___kmpc_is_spmd_exec_mode: 3449 Changed |= foldIsSPMDExecMode(A); 3450 break; 3451 default: 3452 llvm_unreachable("Unhandled OpenMP runtime function!"); 3453 } 3454 3455 return Changed; 3456 } 3457 3458 ChangeStatus manifest(Attributor &A) override { 3459 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3460 3461 if (SimplifiedValue.hasValue() && SimplifiedValue.getValue()) { 3462 Instruction &CB = *getCtxI(); 3463 A.changeValueAfterManifest(CB, **SimplifiedValue); 3464 A.deleteAfterManifest(CB); 3465 Changed = ChangeStatus::CHANGED; 3466 } 3467 3468 return Changed; 3469 } 3470 3471 ChangeStatus indicatePessimisticFixpoint() override { 3472 SimplifiedValue = nullptr; 3473 return AAFoldRuntimeCall::indicatePessimisticFixpoint(); 3474 } 3475 3476 private: 3477 /// Fold __kmpc_is_spmd_exec_mode into a constant if possible. 3478 ChangeStatus foldIsSPMDExecMode(Attributor &A) { 3479 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 3480 3481 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 3482 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 3483 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 3484 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 3485 3486 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 3487 return indicatePessimisticFixpoint(); 3488 3489 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 3490 auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 3491 DepClassTy::REQUIRED); 3492 3493 if (!AA.isValidState()) { 3494 SimplifiedValue = nullptr; 3495 return indicatePessimisticFixpoint(); 3496 } 3497 3498 if (AA.SPMDCompatibilityTracker.isAssumed()) { 3499 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 3500 ++KnownSPMDCount; 3501 else 3502 ++AssumedSPMDCount; 3503 } else { 3504 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 3505 ++KnownNonSPMDCount; 3506 else 3507 ++AssumedNonSPMDCount; 3508 } 3509 } 3510 3511 if (KnownSPMDCount && KnownNonSPMDCount) 3512 return indicatePessimisticFixpoint(); 3513 3514 if (AssumedSPMDCount && AssumedNonSPMDCount) 3515 return indicatePessimisticFixpoint(); 3516 3517 auto &Ctx = getAnchorValue().getContext(); 3518 if (KnownSPMDCount || AssumedSPMDCount) { 3519 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 3520 "Expected only SPMD kernels!"); 3521 // All reaching kernels are in SPMD mode. Update all function calls to 3522 // __kmpc_is_spmd_exec_mode to 1. 3523 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); 3524 } else { 3525 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 3526 "Expected only non-SPMD kernels!"); 3527 // All reaching kernels are in non-SPMD mode. Update all function 3528 // calls to __kmpc_is_spmd_exec_mode to 0. 3529 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false); 3530 } 3531 3532 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 3533 : ChangeStatus::CHANGED; 3534 } 3535 3536 /// An optional value the associated value is assumed to fold to. That is, we 3537 /// assume the associated value (which is a call) can be replaced by this 3538 /// simplified value. 3539 Optional<Value *> SimplifiedValue; 3540 3541 /// The runtime function kind of the callee of the associated call site. 3542 RuntimeFunction RFKind; 3543 }; 3544 3545 } // namespace 3546 3547 void OpenMPOpt::registerAAs(bool IsModulePass) { 3548 if (SCC.empty()) 3549 3550 return; 3551 if (IsModulePass) { 3552 // Ensure we create the AAKernelInfo AAs first and without triggering an 3553 // update. This will make sure we register all value simplification 3554 // callbacks before any other AA has the chance to create an AAValueSimplify 3555 // or similar. 3556 for (Function *Kernel : OMPInfoCache.Kernels) 3557 A.getOrCreateAAFor<AAKernelInfo>( 3558 IRPosition::function(*Kernel), /* QueryingAA */ nullptr, 3559 DepClassTy::NONE, /* ForceUpdate */ false, 3560 /* UpdateAfterInit */ false); 3561 3562 auto &IsSPMDRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_is_spmd_exec_mode]; 3563 IsSPMDRFI.foreachUse(SCC, [&](Use &U, Function &) { 3564 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &IsSPMDRFI); 3565 if (!CI) 3566 return false; 3567 A.getOrCreateAAFor<AAFoldRuntimeCall>( 3568 IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr, 3569 DepClassTy::NONE, /* ForceUpdate */ false, 3570 /* UpdateAfterInit */ false); 3571 return false; 3572 }); 3573 } 3574 3575 // Create CallSite AA for all Getters. 3576 for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { 3577 auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)]; 3578 3579 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 3580 3581 auto CreateAA = [&](Use &U, Function &Caller) { 3582 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 3583 if (!CI) 3584 return false; 3585 3586 auto &CB = cast<CallBase>(*CI); 3587 3588 IRPosition CBPos = IRPosition::callsite_function(CB); 3589 A.getOrCreateAAFor<AAICVTracker>(CBPos); 3590 return false; 3591 }; 3592 3593 GetterRFI.foreachUse(SCC, CreateAA); 3594 } 3595 auto &GlobalizationRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3596 auto CreateAA = [&](Use &U, Function &F) { 3597 A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F)); 3598 return false; 3599 }; 3600 GlobalizationRFI.foreachUse(SCC, CreateAA); 3601 3602 // Create an ExecutionDomain AA for every function and a HeapToStack AA for 3603 // every function if there is a device kernel. 3604 for (auto *F : SCC) { 3605 if (!F->isDeclaration()) 3606 A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(*F)); 3607 if (isOpenMPDevice(M)) 3608 A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(*F)); 3609 } 3610 } 3611 3612 const char AAICVTracker::ID = 0; 3613 const char AAKernelInfo::ID = 0; 3614 const char AAExecutionDomain::ID = 0; 3615 const char AAHeapToShared::ID = 0; 3616 const char AAFoldRuntimeCall::ID = 0; 3617 3618 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 3619 Attributor &A) { 3620 AAICVTracker *AA = nullptr; 3621 switch (IRP.getPositionKind()) { 3622 case IRPosition::IRP_INVALID: 3623 case IRPosition::IRP_FLOAT: 3624 case IRPosition::IRP_ARGUMENT: 3625 case IRPosition::IRP_CALL_SITE_ARGUMENT: 3626 llvm_unreachable("ICVTracker can only be created for function position!"); 3627 case IRPosition::IRP_RETURNED: 3628 AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); 3629 break; 3630 case IRPosition::IRP_CALL_SITE_RETURNED: 3631 AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); 3632 break; 3633 case IRPosition::IRP_CALL_SITE: 3634 AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); 3635 break; 3636 case IRPosition::IRP_FUNCTION: 3637 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 3638 break; 3639 } 3640 3641 return *AA; 3642 } 3643 3644 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP, 3645 Attributor &A) { 3646 AAExecutionDomainFunction *AA = nullptr; 3647 switch (IRP.getPositionKind()) { 3648 case IRPosition::IRP_INVALID: 3649 case IRPosition::IRP_FLOAT: 3650 case IRPosition::IRP_ARGUMENT: 3651 case IRPosition::IRP_CALL_SITE_ARGUMENT: 3652 case IRPosition::IRP_RETURNED: 3653 case IRPosition::IRP_CALL_SITE_RETURNED: 3654 case IRPosition::IRP_CALL_SITE: 3655 llvm_unreachable( 3656 "AAExecutionDomain can only be created for function position!"); 3657 case IRPosition::IRP_FUNCTION: 3658 AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A); 3659 break; 3660 } 3661 3662 return *AA; 3663 } 3664 3665 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP, 3666 Attributor &A) { 3667 AAHeapToSharedFunction *AA = nullptr; 3668 switch (IRP.getPositionKind()) { 3669 case IRPosition::IRP_INVALID: 3670 case IRPosition::IRP_FLOAT: 3671 case IRPosition::IRP_ARGUMENT: 3672 case IRPosition::IRP_CALL_SITE_ARGUMENT: 3673 case IRPosition::IRP_RETURNED: 3674 case IRPosition::IRP_CALL_SITE_RETURNED: 3675 case IRPosition::IRP_CALL_SITE: 3676 llvm_unreachable( 3677 "AAHeapToShared can only be created for function position!"); 3678 case IRPosition::IRP_FUNCTION: 3679 AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A); 3680 break; 3681 } 3682 3683 return *AA; 3684 } 3685 3686 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP, 3687 Attributor &A) { 3688 AAKernelInfo *AA = nullptr; 3689 switch (IRP.getPositionKind()) { 3690 case IRPosition::IRP_INVALID: 3691 case IRPosition::IRP_FLOAT: 3692 case IRPosition::IRP_ARGUMENT: 3693 case IRPosition::IRP_RETURNED: 3694 case IRPosition::IRP_CALL_SITE_RETURNED: 3695 case IRPosition::IRP_CALL_SITE_ARGUMENT: 3696 llvm_unreachable("KernelInfo can only be created for function position!"); 3697 case IRPosition::IRP_CALL_SITE: 3698 AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A); 3699 break; 3700 case IRPosition::IRP_FUNCTION: 3701 AA = new (A.Allocator) AAKernelInfoFunction(IRP, A); 3702 break; 3703 } 3704 3705 return *AA; 3706 } 3707 3708 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP, 3709 Attributor &A) { 3710 AAFoldRuntimeCall *AA = nullptr; 3711 switch (IRP.getPositionKind()) { 3712 case IRPosition::IRP_INVALID: 3713 case IRPosition::IRP_FLOAT: 3714 case IRPosition::IRP_ARGUMENT: 3715 case IRPosition::IRP_RETURNED: 3716 case IRPosition::IRP_FUNCTION: 3717 case IRPosition::IRP_CALL_SITE: 3718 case IRPosition::IRP_CALL_SITE_ARGUMENT: 3719 llvm_unreachable("KernelInfo can only be created for call site position!"); 3720 case IRPosition::IRP_CALL_SITE_RETURNED: 3721 AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A); 3722 break; 3723 } 3724 3725 return *AA; 3726 } 3727 3728 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { 3729 if (!containsOpenMP(M)) 3730 return PreservedAnalyses::all(); 3731 if (DisableOpenMPOptimizations) 3732 return PreservedAnalyses::all(); 3733 3734 FunctionAnalysisManager &FAM = 3735 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 3736 KernelSet Kernels = getDeviceKernels(M); 3737 3738 auto IsCalled = [&](Function &F) { 3739 if (Kernels.contains(&F)) 3740 return true; 3741 for (const User *U : F.users()) 3742 if (!isa<BlockAddress>(U)) 3743 return true; 3744 return false; 3745 }; 3746 3747 auto EmitRemark = [&](Function &F) { 3748 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 3749 ORE.emit([&]() { 3750 OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F); 3751 return ORA << "Could not internalize function. " 3752 << "Some optimizations may not be possible."; 3753 }); 3754 }; 3755 3756 // Create internal copies of each function if this is a kernel Module. This 3757 // allows iterprocedural passes to see every call edge. 3758 DenseSet<const Function *> InternalizedFuncs; 3759 if (isOpenMPDevice(M)) 3760 for (Function &F : M) 3761 if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F)) { 3762 if (Attributor::internalizeFunction(F, /* Force */ true)) { 3763 InternalizedFuncs.insert(&F); 3764 } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) { 3765 EmitRemark(F); 3766 } 3767 } 3768 3769 // Look at every function in the Module unless it was internalized. 3770 SmallVector<Function *, 16> SCC; 3771 for (Function &F : M) 3772 if (!F.isDeclaration() && !InternalizedFuncs.contains(&F)) 3773 SCC.push_back(&F); 3774 3775 if (SCC.empty()) 3776 return PreservedAnalyses::all(); 3777 3778 AnalysisGetter AG(FAM); 3779 3780 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 3781 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 3782 }; 3783 3784 BumpPtrAllocator Allocator; 3785 CallGraphUpdater CGUpdater; 3786 3787 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 3788 OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, Kernels); 3789 3790 unsigned MaxFixpointIterations = (isOpenMPDevice(M)) ? 128 : 32; 3791 Attributor A(Functions, InfoCache, CGUpdater, nullptr, true, false, 3792 MaxFixpointIterations, OREGetter, DEBUG_TYPE); 3793 3794 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 3795 bool Changed = OMPOpt.run(true); 3796 if (Changed) 3797 return PreservedAnalyses::none(); 3798 3799 return PreservedAnalyses::all(); 3800 } 3801 3802 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, 3803 CGSCCAnalysisManager &AM, 3804 LazyCallGraph &CG, 3805 CGSCCUpdateResult &UR) { 3806 if (!containsOpenMP(*C.begin()->getFunction().getParent())) 3807 return PreservedAnalyses::all(); 3808 if (DisableOpenMPOptimizations) 3809 return PreservedAnalyses::all(); 3810 3811 SmallVector<Function *, 16> SCC; 3812 // If there are kernels in the module, we have to run on all SCC's. 3813 for (LazyCallGraph::Node &N : C) { 3814 Function *Fn = &N.getFunction(); 3815 SCC.push_back(Fn); 3816 } 3817 3818 if (SCC.empty()) 3819 return PreservedAnalyses::all(); 3820 3821 Module &M = *C.begin()->getFunction().getParent(); 3822 3823 KernelSet Kernels = getDeviceKernels(M); 3824 3825 FunctionAnalysisManager &FAM = 3826 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 3827 3828 AnalysisGetter AG(FAM); 3829 3830 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 3831 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 3832 }; 3833 3834 BumpPtrAllocator Allocator; 3835 CallGraphUpdater CGUpdater; 3836 CGUpdater.initialize(CG, C, AM, UR); 3837 3838 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 3839 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 3840 /*CGSCC*/ Functions, Kernels); 3841 3842 unsigned MaxFixpointIterations = (isOpenMPDevice(M)) ? 128 : 32; 3843 Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true, 3844 MaxFixpointIterations, OREGetter, DEBUG_TYPE); 3845 3846 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 3847 bool Changed = OMPOpt.run(false); 3848 if (Changed) 3849 return PreservedAnalyses::none(); 3850 3851 return PreservedAnalyses::all(); 3852 } 3853 3854 namespace { 3855 3856 struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass { 3857 CallGraphUpdater CGUpdater; 3858 static char ID; 3859 3860 OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) { 3861 initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry()); 3862 } 3863 3864 void getAnalysisUsage(AnalysisUsage &AU) const override { 3865 CallGraphSCCPass::getAnalysisUsage(AU); 3866 } 3867 3868 bool runOnSCC(CallGraphSCC &CGSCC) override { 3869 if (!containsOpenMP(CGSCC.getCallGraph().getModule())) 3870 return false; 3871 if (DisableOpenMPOptimizations || skipSCC(CGSCC)) 3872 return false; 3873 3874 SmallVector<Function *, 16> SCC; 3875 // If there are kernels in the module, we have to run on all SCC's. 3876 for (CallGraphNode *CGN : CGSCC) { 3877 Function *Fn = CGN->getFunction(); 3878 if (!Fn || Fn->isDeclaration()) 3879 continue; 3880 SCC.push_back(Fn); 3881 } 3882 3883 if (SCC.empty()) 3884 return false; 3885 3886 Module &M = CGSCC.getCallGraph().getModule(); 3887 KernelSet Kernels = getDeviceKernels(M); 3888 3889 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 3890 CGUpdater.initialize(CG, CGSCC); 3891 3892 // Maintain a map of functions to avoid rebuilding the ORE 3893 DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap; 3894 auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & { 3895 std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F]; 3896 if (!ORE) 3897 ORE = std::make_unique<OptimizationRemarkEmitter>(F); 3898 return *ORE; 3899 }; 3900 3901 AnalysisGetter AG; 3902 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 3903 BumpPtrAllocator Allocator; 3904 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, 3905 Allocator, 3906 /*CGSCC*/ Functions, Kernels); 3907 3908 unsigned MaxFixpointIterations = (isOpenMPDevice(M)) ? 128 : 32; 3909 Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true, 3910 MaxFixpointIterations, OREGetter, DEBUG_TYPE); 3911 3912 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 3913 return OMPOpt.run(false); 3914 } 3915 3916 bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } 3917 }; 3918 3919 } // end anonymous namespace 3920 3921 KernelSet llvm::omp::getDeviceKernels(Module &M) { 3922 // TODO: Create a more cross-platform way of determining device kernels. 3923 NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 3924 KernelSet Kernels; 3925 3926 if (!MD) 3927 return Kernels; 3928 3929 for (auto *Op : MD->operands()) { 3930 if (Op->getNumOperands() < 2) 3931 continue; 3932 MDString *KindID = dyn_cast<MDString>(Op->getOperand(1)); 3933 if (!KindID || KindID->getString() != "kernel") 3934 continue; 3935 3936 Function *KernelFn = 3937 mdconst::dyn_extract_or_null<Function>(Op->getOperand(0)); 3938 if (!KernelFn) 3939 continue; 3940 3941 ++NumOpenMPTargetRegionKernels; 3942 3943 Kernels.insert(KernelFn); 3944 } 3945 3946 return Kernels; 3947 } 3948 3949 bool llvm::omp::containsOpenMP(Module &M) { 3950 Metadata *MD = M.getModuleFlag("openmp"); 3951 if (!MD) 3952 return false; 3953 3954 return true; 3955 } 3956 3957 bool llvm::omp::isOpenMPDevice(Module &M) { 3958 Metadata *MD = M.getModuleFlag("openmp-device"); 3959 if (!MD) 3960 return false; 3961 3962 return true; 3963 } 3964 3965 char OpenMPOptCGSCCLegacyPass::ID = 0; 3966 3967 INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 3968 "OpenMP specific optimizations", false, false) 3969 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 3970 INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 3971 "OpenMP specific optimizations", false, false) 3972 3973 Pass *llvm::createOpenMPOptCGSCCLegacyPass() { 3974 return new OpenMPOptCGSCCLegacyPass(); 3975 } 3976