1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // - Replacing globalized device memory with stack memory. 13 // - Replacing globalized device memory with shared memory. 14 // - Parallel region merging. 15 // - Transforming generic-mode device kernels to SPMD mode. 16 // - Specializing the state machine for generic-mode device kernels. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/IPO/OpenMPOpt.h" 21 22 #include "llvm/ADT/EnumeratedArray.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/CallGraph.h" 27 #include "llvm/Analysis/CallGraphSCCPass.h" 28 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Frontend/OpenMP/OMPConstants.h" 31 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 32 #include "llvm/IR/Assumptions.h" 33 #include "llvm/IR/DiagnosticInfo.h" 34 #include "llvm/IR/GlobalValue.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/IntrinsicsAMDGPU.h" 38 #include "llvm/IR/IntrinsicsNVPTX.h" 39 #include "llvm/InitializePasses.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Transforms/IPO.h" 42 #include "llvm/Transforms/IPO/Attributor.h" 43 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 44 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 45 #include "llvm/Transforms/Utils/CodeExtractor.h" 46 47 #include <algorithm> 48 49 using namespace llvm; 50 using namespace omp; 51 52 #define DEBUG_TYPE "openmp-opt" 53 54 static cl::opt<bool> DisableOpenMPOptimizations( 55 "openmp-opt-disable", cl::ZeroOrMore, 56 cl::desc("Disable OpenMP specific optimizations."), cl::Hidden, 57 cl::init(false)); 58 59 static cl::opt<bool> EnableParallelRegionMerging( 60 "openmp-opt-enable-merging", cl::ZeroOrMore, 61 cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden, 62 cl::init(false)); 63 64 static cl::opt<bool> 65 DisableInternalization("openmp-opt-disable-internalization", cl::ZeroOrMore, 66 cl::desc("Disable function internalization."), 67 cl::Hidden, cl::init(false)); 68 69 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 70 cl::Hidden); 71 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels", 72 cl::init(false), cl::Hidden); 73 74 static cl::opt<bool> HideMemoryTransferLatency( 75 "openmp-hide-memory-transfer-latency", 76 cl::desc("[WIP] Tries to hide the latency of host to device memory" 77 " transfers"), 78 cl::Hidden, cl::init(false)); 79 80 static cl::opt<bool> DisableOpenMPOptDeglobalization( 81 "openmp-opt-disable-deglobalization", cl::ZeroOrMore, 82 cl::desc("Disable OpenMP optimizations involving deglobalization."), 83 cl::Hidden, cl::init(false)); 84 85 static cl::opt<bool> DisableOpenMPOptSPMDization( 86 "openmp-opt-disable-spmdization", cl::ZeroOrMore, 87 cl::desc("Disable OpenMP optimizations involving SPMD-ization."), 88 cl::Hidden, cl::init(false)); 89 90 static cl::opt<bool> DisableOpenMPOptFolding( 91 "openmp-opt-disable-folding", cl::ZeroOrMore, 92 cl::desc("Disable OpenMP optimizations involving folding."), cl::Hidden, 93 cl::init(false)); 94 95 static cl::opt<bool> DisableOpenMPOptStateMachineRewrite( 96 "openmp-opt-disable-state-machine-rewrite", cl::ZeroOrMore, 97 cl::desc("Disable OpenMP optimizations that replace the state machine."), 98 cl::Hidden, cl::init(false)); 99 100 static cl::opt<bool> PrintModuleAfterOptimizations( 101 "openmp-opt-print-module", cl::ZeroOrMore, 102 cl::desc("Print the current module after OpenMP optimizations."), 103 cl::Hidden, cl::init(false)); 104 105 static cl::opt<bool> AlwaysInlineDeviceFunctions( 106 "openmp-opt-inline-device", cl::ZeroOrMore, 107 cl::desc("Inline all applicible functions on the device."), cl::Hidden, 108 cl::init(false)); 109 110 static cl::opt<bool> 111 EnableVerboseRemarks("openmp-opt-verbose-remarks", cl::ZeroOrMore, 112 cl::desc("Enables more verbose remarks."), cl::Hidden, 113 cl::init(false)); 114 115 static cl::opt<unsigned> 116 SetFixpointIterations("openmp-opt-max-iterations", cl::Hidden, 117 cl::desc("Maximal number of attributor iterations."), 118 cl::init(256)); 119 120 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 121 "Number of OpenMP runtime calls deduplicated"); 122 STATISTIC(NumOpenMPParallelRegionsDeleted, 123 "Number of OpenMP parallel regions deleted"); 124 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 125 "Number of OpenMP runtime functions identified"); 126 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 127 "Number of OpenMP runtime function uses identified"); 128 STATISTIC(NumOpenMPTargetRegionKernels, 129 "Number of OpenMP target region entry points (=kernels) identified"); 130 STATISTIC(NumOpenMPTargetRegionKernelsSPMD, 131 "Number of OpenMP target region entry points (=kernels) executed in " 132 "SPMD-mode instead of generic-mode"); 133 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine, 134 "Number of OpenMP target region entry points (=kernels) executed in " 135 "generic-mode without a state machines"); 136 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback, 137 "Number of OpenMP target region entry points (=kernels) executed in " 138 "generic-mode with customized state machines with fallback"); 139 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback, 140 "Number of OpenMP target region entry points (=kernels) executed in " 141 "generic-mode with customized state machines without fallback"); 142 STATISTIC( 143 NumOpenMPParallelRegionsReplacedInGPUStateMachine, 144 "Number of OpenMP parallel regions replaced with ID in GPU state machines"); 145 STATISTIC(NumOpenMPParallelRegionsMerged, 146 "Number of OpenMP parallel regions merged"); 147 STATISTIC(NumBytesMovedToSharedMemory, 148 "Amount of memory pushed to shared memory"); 149 150 #if !defined(NDEBUG) 151 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 152 #endif 153 154 namespace { 155 156 enum class AddressSpace : unsigned { 157 Generic = 0, 158 Global = 1, 159 Shared = 3, 160 Constant = 4, 161 Local = 5, 162 }; 163 164 struct AAHeapToShared; 165 166 struct AAICVTracker; 167 168 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 169 /// Attributor runs. 170 struct OMPInformationCache : public InformationCache { 171 OMPInformationCache(Module &M, AnalysisGetter &AG, 172 BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC, 173 SmallPtrSetImpl<Kernel> &Kernels) 174 : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M), 175 Kernels(Kernels) { 176 177 OMPBuilder.initialize(); 178 initializeRuntimeFunctions(); 179 initializeInternalControlVars(); 180 } 181 182 /// Generic information that describes an internal control variable. 183 struct InternalControlVarInfo { 184 /// The kind, as described by InternalControlVar enum. 185 InternalControlVar Kind; 186 187 /// The name of the ICV. 188 StringRef Name; 189 190 /// Environment variable associated with this ICV. 191 StringRef EnvVarName; 192 193 /// Initial value kind. 194 ICVInitValue InitKind; 195 196 /// Initial value. 197 ConstantInt *InitValue; 198 199 /// Setter RTL function associated with this ICV. 200 RuntimeFunction Setter; 201 202 /// Getter RTL function associated with this ICV. 203 RuntimeFunction Getter; 204 205 /// RTL Function corresponding to the override clause of this ICV 206 RuntimeFunction Clause; 207 }; 208 209 /// Generic information that describes a runtime function 210 struct RuntimeFunctionInfo { 211 212 /// The kind, as described by the RuntimeFunction enum. 213 RuntimeFunction Kind; 214 215 /// The name of the function. 216 StringRef Name; 217 218 /// Flag to indicate a variadic function. 219 bool IsVarArg; 220 221 /// The return type of the function. 222 Type *ReturnType; 223 224 /// The argument types of the function. 225 SmallVector<Type *, 8> ArgumentTypes; 226 227 /// The declaration if available. 228 Function *Declaration = nullptr; 229 230 /// Uses of this runtime function per function containing the use. 231 using UseVector = SmallVector<Use *, 16>; 232 233 /// Clear UsesMap for runtime function. 234 void clearUsesMap() { UsesMap.clear(); } 235 236 /// Boolean conversion that is true if the runtime function was found. 237 operator bool() const { return Declaration; } 238 239 /// Return the vector of uses in function \p F. 240 UseVector &getOrCreateUseVector(Function *F) { 241 std::shared_ptr<UseVector> &UV = UsesMap[F]; 242 if (!UV) 243 UV = std::make_shared<UseVector>(); 244 return *UV; 245 } 246 247 /// Return the vector of uses in function \p F or `nullptr` if there are 248 /// none. 249 const UseVector *getUseVector(Function &F) const { 250 auto I = UsesMap.find(&F); 251 if (I != UsesMap.end()) 252 return I->second.get(); 253 return nullptr; 254 } 255 256 /// Return how many functions contain uses of this runtime function. 257 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 258 259 /// Return the number of arguments (or the minimal number for variadic 260 /// functions). 261 size_t getNumArgs() const { return ArgumentTypes.size(); } 262 263 /// Run the callback \p CB on each use and forget the use if the result is 264 /// true. The callback will be fed the function in which the use was 265 /// encountered as second argument. 266 void foreachUse(SmallVectorImpl<Function *> &SCC, 267 function_ref<bool(Use &, Function &)> CB) { 268 for (Function *F : SCC) 269 foreachUse(CB, F); 270 } 271 272 /// Run the callback \p CB on each use within the function \p F and forget 273 /// the use if the result is true. 274 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) { 275 SmallVector<unsigned, 8> ToBeDeleted; 276 ToBeDeleted.clear(); 277 278 unsigned Idx = 0; 279 UseVector &UV = getOrCreateUseVector(F); 280 281 for (Use *U : UV) { 282 if (CB(*U, *F)) 283 ToBeDeleted.push_back(Idx); 284 ++Idx; 285 } 286 287 // Remove the to-be-deleted indices in reverse order as prior 288 // modifications will not modify the smaller indices. 289 while (!ToBeDeleted.empty()) { 290 unsigned Idx = ToBeDeleted.pop_back_val(); 291 UV[Idx] = UV.back(); 292 UV.pop_back(); 293 } 294 } 295 296 private: 297 /// Map from functions to all uses of this runtime function contained in 298 /// them. 299 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 300 301 public: 302 /// Iterators for the uses of this runtime function. 303 decltype(UsesMap)::iterator begin() { return UsesMap.begin(); } 304 decltype(UsesMap)::iterator end() { return UsesMap.end(); } 305 }; 306 307 /// An OpenMP-IR-Builder instance 308 OpenMPIRBuilder OMPBuilder; 309 310 /// Map from runtime function kind to the runtime function description. 311 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 312 RuntimeFunction::OMPRTL___last> 313 RFIs; 314 315 /// Map from function declarations/definitions to their runtime enum type. 316 DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap; 317 318 /// Map from ICV kind to the ICV description. 319 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 320 InternalControlVar::ICV___last> 321 ICVs; 322 323 /// Helper to initialize all internal control variable information for those 324 /// defined in OMPKinds.def. 325 void initializeInternalControlVars() { 326 #define ICV_RT_SET(_Name, RTL) \ 327 { \ 328 auto &ICV = ICVs[_Name]; \ 329 ICV.Setter = RTL; \ 330 } 331 #define ICV_RT_GET(Name, RTL) \ 332 { \ 333 auto &ICV = ICVs[Name]; \ 334 ICV.Getter = RTL; \ 335 } 336 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 337 { \ 338 auto &ICV = ICVs[Enum]; \ 339 ICV.Name = _Name; \ 340 ICV.Kind = Enum; \ 341 ICV.InitKind = Init; \ 342 ICV.EnvVarName = _EnvVarName; \ 343 switch (ICV.InitKind) { \ 344 case ICV_IMPLEMENTATION_DEFINED: \ 345 ICV.InitValue = nullptr; \ 346 break; \ 347 case ICV_ZERO: \ 348 ICV.InitValue = ConstantInt::get( \ 349 Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ 350 break; \ 351 case ICV_FALSE: \ 352 ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ 353 break; \ 354 case ICV_LAST: \ 355 break; \ 356 } \ 357 } 358 #include "llvm/Frontend/OpenMP/OMPKinds.def" 359 } 360 361 /// Returns true if the function declaration \p F matches the runtime 362 /// function types, that is, return type \p RTFRetType, and argument types 363 /// \p RTFArgTypes. 364 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 365 SmallVector<Type *, 8> &RTFArgTypes) { 366 // TODO: We should output information to the user (under debug output 367 // and via remarks). 368 369 if (!F) 370 return false; 371 if (F->getReturnType() != RTFRetType) 372 return false; 373 if (F->arg_size() != RTFArgTypes.size()) 374 return false; 375 376 auto *RTFTyIt = RTFArgTypes.begin(); 377 for (Argument &Arg : F->args()) { 378 if (Arg.getType() != *RTFTyIt) 379 return false; 380 381 ++RTFTyIt; 382 } 383 384 return true; 385 } 386 387 // Helper to collect all uses of the declaration in the UsesMap. 388 unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { 389 unsigned NumUses = 0; 390 if (!RFI.Declaration) 391 return NumUses; 392 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 393 394 if (CollectStats) { 395 NumOpenMPRuntimeFunctionsIdentified += 1; 396 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 397 } 398 399 // TODO: We directly convert uses into proper calls and unknown uses. 400 for (Use &U : RFI.Declaration->uses()) { 401 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 402 if (ModuleSlice.count(UserI->getFunction())) { 403 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 404 ++NumUses; 405 } 406 } else { 407 RFI.getOrCreateUseVector(nullptr).push_back(&U); 408 ++NumUses; 409 } 410 } 411 return NumUses; 412 } 413 414 // Helper function to recollect uses of a runtime function. 415 void recollectUsesForFunction(RuntimeFunction RTF) { 416 auto &RFI = RFIs[RTF]; 417 RFI.clearUsesMap(); 418 collectUses(RFI, /*CollectStats*/ false); 419 } 420 421 // Helper function to recollect uses of all runtime functions. 422 void recollectUses() { 423 for (int Idx = 0; Idx < RFIs.size(); ++Idx) 424 recollectUsesForFunction(static_cast<RuntimeFunction>(Idx)); 425 } 426 427 /// Helper to initialize all runtime function information for those defined 428 /// in OpenMPKinds.def. 429 void initializeRuntimeFunctions() { 430 Module &M = *((*ModuleSlice.begin())->getParent()); 431 432 // Helper macros for handling __VA_ARGS__ in OMP_RTL 433 #define OMP_TYPE(VarName, ...) \ 434 Type *VarName = OMPBuilder.VarName; \ 435 (void)VarName; 436 437 #define OMP_ARRAY_TYPE(VarName, ...) \ 438 ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ 439 (void)VarName##Ty; \ 440 PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ 441 (void)VarName##PtrTy; 442 443 #define OMP_FUNCTION_TYPE(VarName, ...) \ 444 FunctionType *VarName = OMPBuilder.VarName; \ 445 (void)VarName; \ 446 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 447 (void)VarName##Ptr; 448 449 #define OMP_STRUCT_TYPE(VarName, ...) \ 450 StructType *VarName = OMPBuilder.VarName; \ 451 (void)VarName; \ 452 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 453 (void)VarName##Ptr; 454 455 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 456 { \ 457 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 458 Function *F = M.getFunction(_Name); \ 459 RTLFunctions.insert(F); \ 460 if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ 461 RuntimeFunctionIDMap[F] = _Enum; \ 462 F->removeFnAttr(Attribute::NoInline); \ 463 auto &RFI = RFIs[_Enum]; \ 464 RFI.Kind = _Enum; \ 465 RFI.Name = _Name; \ 466 RFI.IsVarArg = _IsVarArg; \ 467 RFI.ReturnType = OMPBuilder._ReturnType; \ 468 RFI.ArgumentTypes = std::move(ArgsTypes); \ 469 RFI.Declaration = F; \ 470 unsigned NumUses = collectUses(RFI); \ 471 (void)NumUses; \ 472 LLVM_DEBUG({ \ 473 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 474 << " found\n"; \ 475 if (RFI.Declaration) \ 476 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 477 << RFI.getNumFunctionsWithUses() \ 478 << " different functions.\n"; \ 479 }); \ 480 } \ 481 } 482 #include "llvm/Frontend/OpenMP/OMPKinds.def" 483 484 // TODO: We should attach the attributes defined in OMPKinds.def. 485 } 486 487 /// Collection of known kernels (\see Kernel) in the module. 488 SmallPtrSetImpl<Kernel> &Kernels; 489 490 /// Collection of known OpenMP runtime functions.. 491 DenseSet<const Function *> RTLFunctions; 492 }; 493 494 template <typename Ty, bool InsertInvalidates = true> 495 struct BooleanStateWithSetVector : public BooleanState { 496 bool contains(const Ty &Elem) const { return Set.contains(Elem); } 497 bool insert(const Ty &Elem) { 498 if (InsertInvalidates) 499 BooleanState::indicatePessimisticFixpoint(); 500 return Set.insert(Elem); 501 } 502 503 const Ty &operator[](int Idx) const { return Set[Idx]; } 504 bool operator==(const BooleanStateWithSetVector &RHS) const { 505 return BooleanState::operator==(RHS) && Set == RHS.Set; 506 } 507 bool operator!=(const BooleanStateWithSetVector &RHS) const { 508 return !(*this == RHS); 509 } 510 511 bool empty() const { return Set.empty(); } 512 size_t size() const { return Set.size(); } 513 514 /// "Clamp" this state with \p RHS. 515 BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) { 516 BooleanState::operator^=(RHS); 517 Set.insert(RHS.Set.begin(), RHS.Set.end()); 518 return *this; 519 } 520 521 private: 522 /// A set to keep track of elements. 523 SetVector<Ty> Set; 524 525 public: 526 typename decltype(Set)::iterator begin() { return Set.begin(); } 527 typename decltype(Set)::iterator end() { return Set.end(); } 528 typename decltype(Set)::const_iterator begin() const { return Set.begin(); } 529 typename decltype(Set)::const_iterator end() const { return Set.end(); } 530 }; 531 532 template <typename Ty, bool InsertInvalidates = true> 533 using BooleanStateWithPtrSetVector = 534 BooleanStateWithSetVector<Ty *, InsertInvalidates>; 535 536 struct KernelInfoState : AbstractState { 537 /// Flag to track if we reached a fixpoint. 538 bool IsAtFixpoint = false; 539 540 /// The parallel regions (identified by the outlined parallel functions) that 541 /// can be reached from the associated function. 542 BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false> 543 ReachedKnownParallelRegions; 544 545 /// State to track what parallel region we might reach. 546 BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions; 547 548 /// State to track if we are in SPMD-mode, assumed or know, and why we decided 549 /// we cannot be. If it is assumed, then RequiresFullRuntime should also be 550 /// false. 551 BooleanStateWithPtrSetVector<Instruction, false> SPMDCompatibilityTracker; 552 553 /// The __kmpc_target_init call in this kernel, if any. If we find more than 554 /// one we abort as the kernel is malformed. 555 CallBase *KernelInitCB = nullptr; 556 557 /// The __kmpc_target_deinit call in this kernel, if any. If we find more than 558 /// one we abort as the kernel is malformed. 559 CallBase *KernelDeinitCB = nullptr; 560 561 /// Flag to indicate if the associated function is a kernel entry. 562 bool IsKernelEntry = false; 563 564 /// State to track what kernel entries can reach the associated function. 565 BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries; 566 567 /// State to indicate if we can track parallel level of the associated 568 /// function. We will give up tracking if we encounter unknown caller or the 569 /// caller is __kmpc_parallel_51. 570 BooleanStateWithSetVector<uint8_t> ParallelLevels; 571 572 /// Abstract State interface 573 ///{ 574 575 KernelInfoState() {} 576 KernelInfoState(bool BestState) { 577 if (!BestState) 578 indicatePessimisticFixpoint(); 579 } 580 581 /// See AbstractState::isValidState(...) 582 bool isValidState() const override { return true; } 583 584 /// See AbstractState::isAtFixpoint(...) 585 bool isAtFixpoint() const override { return IsAtFixpoint; } 586 587 /// See AbstractState::indicatePessimisticFixpoint(...) 588 ChangeStatus indicatePessimisticFixpoint() override { 589 IsAtFixpoint = true; 590 ReachingKernelEntries.indicatePessimisticFixpoint(); 591 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 592 ReachedKnownParallelRegions.indicatePessimisticFixpoint(); 593 ReachedUnknownParallelRegions.indicatePessimisticFixpoint(); 594 return ChangeStatus::CHANGED; 595 } 596 597 /// See AbstractState::indicateOptimisticFixpoint(...) 598 ChangeStatus indicateOptimisticFixpoint() override { 599 IsAtFixpoint = true; 600 ReachingKernelEntries.indicateOptimisticFixpoint(); 601 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 602 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 603 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 604 return ChangeStatus::UNCHANGED; 605 } 606 607 /// Return the assumed state 608 KernelInfoState &getAssumed() { return *this; } 609 const KernelInfoState &getAssumed() const { return *this; } 610 611 bool operator==(const KernelInfoState &RHS) const { 612 if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker) 613 return false; 614 if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions) 615 return false; 616 if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions) 617 return false; 618 if (ReachingKernelEntries != RHS.ReachingKernelEntries) 619 return false; 620 return true; 621 } 622 623 /// Returns true if this kernel contains any OpenMP parallel regions. 624 bool mayContainParallelRegion() { 625 return !ReachedKnownParallelRegions.empty() || 626 !ReachedUnknownParallelRegions.empty(); 627 } 628 629 /// Return empty set as the best state of potential values. 630 static KernelInfoState getBestState() { return KernelInfoState(true); } 631 632 static KernelInfoState getBestState(KernelInfoState &KIS) { 633 return getBestState(); 634 } 635 636 /// Return full set as the worst state of potential values. 637 static KernelInfoState getWorstState() { return KernelInfoState(false); } 638 639 /// "Clamp" this state with \p KIS. 640 KernelInfoState operator^=(const KernelInfoState &KIS) { 641 // Do not merge two different _init and _deinit call sites. 642 if (KIS.KernelInitCB) { 643 if (KernelInitCB && KernelInitCB != KIS.KernelInitCB) 644 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 645 "assumptions."); 646 KernelInitCB = KIS.KernelInitCB; 647 } 648 if (KIS.KernelDeinitCB) { 649 if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB) 650 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 651 "assumptions."); 652 KernelDeinitCB = KIS.KernelDeinitCB; 653 } 654 SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker; 655 ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions; 656 ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions; 657 return *this; 658 } 659 660 KernelInfoState operator&=(const KernelInfoState &KIS) { 661 return (*this ^= KIS); 662 } 663 664 ///} 665 }; 666 667 /// Used to map the values physically (in the IR) stored in an offload 668 /// array, to a vector in memory. 669 struct OffloadArray { 670 /// Physical array (in the IR). 671 AllocaInst *Array = nullptr; 672 /// Mapped values. 673 SmallVector<Value *, 8> StoredValues; 674 /// Last stores made in the offload array. 675 SmallVector<StoreInst *, 8> LastAccesses; 676 677 OffloadArray() = default; 678 679 /// Initializes the OffloadArray with the values stored in \p Array before 680 /// instruction \p Before is reached. Returns false if the initialization 681 /// fails. 682 /// This MUST be used immediately after the construction of the object. 683 bool initialize(AllocaInst &Array, Instruction &Before) { 684 if (!Array.getAllocatedType()->isArrayTy()) 685 return false; 686 687 if (!getValues(Array, Before)) 688 return false; 689 690 this->Array = &Array; 691 return true; 692 } 693 694 static const unsigned DeviceIDArgNum = 1; 695 static const unsigned BasePtrsArgNum = 3; 696 static const unsigned PtrsArgNum = 4; 697 static const unsigned SizesArgNum = 5; 698 699 private: 700 /// Traverses the BasicBlock where \p Array is, collecting the stores made to 701 /// \p Array, leaving StoredValues with the values stored before the 702 /// instruction \p Before is reached. 703 bool getValues(AllocaInst &Array, Instruction &Before) { 704 // Initialize container. 705 const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements(); 706 StoredValues.assign(NumValues, nullptr); 707 LastAccesses.assign(NumValues, nullptr); 708 709 // TODO: This assumes the instruction \p Before is in the same 710 // BasicBlock as Array. Make it general, for any control flow graph. 711 BasicBlock *BB = Array.getParent(); 712 if (BB != Before.getParent()) 713 return false; 714 715 const DataLayout &DL = Array.getModule()->getDataLayout(); 716 const unsigned int PointerSize = DL.getPointerSize(); 717 718 for (Instruction &I : *BB) { 719 if (&I == &Before) 720 break; 721 722 if (!isa<StoreInst>(&I)) 723 continue; 724 725 auto *S = cast<StoreInst>(&I); 726 int64_t Offset = -1; 727 auto *Dst = 728 GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL); 729 if (Dst == &Array) { 730 int64_t Idx = Offset / PointerSize; 731 StoredValues[Idx] = getUnderlyingObject(S->getValueOperand()); 732 LastAccesses[Idx] = S; 733 } 734 } 735 736 return isFilled(); 737 } 738 739 /// Returns true if all values in StoredValues and 740 /// LastAccesses are not nullptrs. 741 bool isFilled() { 742 const unsigned NumValues = StoredValues.size(); 743 for (unsigned I = 0; I < NumValues; ++I) { 744 if (!StoredValues[I] || !LastAccesses[I]) 745 return false; 746 } 747 748 return true; 749 } 750 }; 751 752 struct OpenMPOpt { 753 754 using OptimizationRemarkGetter = 755 function_ref<OptimizationRemarkEmitter &(Function *)>; 756 757 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 758 OptimizationRemarkGetter OREGetter, 759 OMPInformationCache &OMPInfoCache, Attributor &A) 760 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 761 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 762 763 /// Check if any remarks are enabled for openmp-opt 764 bool remarksEnabled() { 765 auto &Ctx = M.getContext(); 766 return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE); 767 } 768 769 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 770 bool run(bool IsModulePass) { 771 if (SCC.empty()) 772 return false; 773 774 bool Changed = false; 775 776 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 777 << " functions in a slice with " 778 << OMPInfoCache.ModuleSlice.size() << " functions\n"); 779 780 if (IsModulePass) { 781 Changed |= runAttributor(IsModulePass); 782 783 // Recollect uses, in case Attributor deleted any. 784 OMPInfoCache.recollectUses(); 785 786 // TODO: This should be folded into buildCustomStateMachine. 787 Changed |= rewriteDeviceCodeStateMachine(); 788 789 if (remarksEnabled()) 790 analysisGlobalization(); 791 } else { 792 if (PrintICVValues) 793 printICVs(); 794 if (PrintOpenMPKernels) 795 printKernels(); 796 797 Changed |= runAttributor(IsModulePass); 798 799 // Recollect uses, in case Attributor deleted any. 800 OMPInfoCache.recollectUses(); 801 802 Changed |= deleteParallelRegions(); 803 804 if (HideMemoryTransferLatency) 805 Changed |= hideMemTransfersLatency(); 806 Changed |= deduplicateRuntimeCalls(); 807 if (EnableParallelRegionMerging) { 808 if (mergeParallelRegions()) { 809 deduplicateRuntimeCalls(); 810 Changed = true; 811 } 812 } 813 } 814 815 return Changed; 816 } 817 818 /// Print initial ICV values for testing. 819 /// FIXME: This should be done from the Attributor once it is added. 820 void printICVs() const { 821 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel, 822 ICV_proc_bind}; 823 824 for (Function *F : OMPInfoCache.ModuleSlice) { 825 for (auto ICV : ICVs) { 826 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 827 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 828 return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 829 << " Value: " 830 << (ICVInfo.InitValue 831 ? toString(ICVInfo.InitValue->getValue(), 10, true) 832 : "IMPLEMENTATION_DEFINED"); 833 }; 834 835 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark); 836 } 837 } 838 } 839 840 /// Print OpenMP GPU kernels for testing. 841 void printKernels() const { 842 for (Function *F : SCC) { 843 if (!OMPInfoCache.Kernels.count(F)) 844 continue; 845 846 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 847 return ORA << "OpenMP GPU kernel " 848 << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; 849 }; 850 851 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark); 852 } 853 } 854 855 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 856 /// given it has to be the callee or a nullptr is returned. 857 static CallInst *getCallIfRegularCall( 858 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 859 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 860 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 861 (!RFI || 862 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 863 return CI; 864 return nullptr; 865 } 866 867 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 868 /// the callee or a nullptr is returned. 869 static CallInst *getCallIfRegularCall( 870 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 871 CallInst *CI = dyn_cast<CallInst>(&V); 872 if (CI && !CI->hasOperandBundles() && 873 (!RFI || 874 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 875 return CI; 876 return nullptr; 877 } 878 879 private: 880 /// Merge parallel regions when it is safe. 881 bool mergeParallelRegions() { 882 const unsigned CallbackCalleeOperand = 2; 883 const unsigned CallbackFirstArgOperand = 3; 884 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 885 886 // Check if there are any __kmpc_fork_call calls to merge. 887 OMPInformationCache::RuntimeFunctionInfo &RFI = 888 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 889 890 if (!RFI.Declaration) 891 return false; 892 893 // Unmergable calls that prevent merging a parallel region. 894 OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = { 895 OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind], 896 OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads], 897 }; 898 899 bool Changed = false; 900 LoopInfo *LI = nullptr; 901 DominatorTree *DT = nullptr; 902 903 SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap; 904 905 BasicBlock *StartBB = nullptr, *EndBB = nullptr; 906 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 907 BasicBlock &ContinuationIP) { 908 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 909 BasicBlock *CGEndBB = 910 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 911 assert(StartBB != nullptr && "StartBB should not be null"); 912 CGStartBB->getTerminator()->setSuccessor(0, StartBB); 913 assert(EndBB != nullptr && "EndBB should not be null"); 914 EndBB->getTerminator()->setSuccessor(0, CGEndBB); 915 }; 916 917 auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &, 918 Value &Inner, Value *&ReplacementValue) -> InsertPointTy { 919 ReplacementValue = &Inner; 920 return CodeGenIP; 921 }; 922 923 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 924 925 /// Create a sequential execution region within a merged parallel region, 926 /// encapsulated in a master construct with a barrier for synchronization. 927 auto CreateSequentialRegion = [&](Function *OuterFn, 928 BasicBlock *OuterPredBB, 929 Instruction *SeqStartI, 930 Instruction *SeqEndI) { 931 // Isolate the instructions of the sequential region to a separate 932 // block. 933 BasicBlock *ParentBB = SeqStartI->getParent(); 934 BasicBlock *SeqEndBB = 935 SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI); 936 BasicBlock *SeqAfterBB = 937 SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI); 938 BasicBlock *SeqStartBB = 939 SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged"); 940 941 assert(ParentBB->getUniqueSuccessor() == SeqStartBB && 942 "Expected a different CFG"); 943 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 944 ParentBB->getTerminator()->eraseFromParent(); 945 946 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 947 BasicBlock &ContinuationIP) { 948 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 949 BasicBlock *CGEndBB = 950 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 951 assert(SeqStartBB != nullptr && "SeqStartBB should not be null"); 952 CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB); 953 assert(SeqEndBB != nullptr && "SeqEndBB should not be null"); 954 SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB); 955 }; 956 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 957 958 // Find outputs from the sequential region to outside users and 959 // broadcast their values to them. 960 for (Instruction &I : *SeqStartBB) { 961 SmallPtrSet<Instruction *, 4> OutsideUsers; 962 for (User *Usr : I.users()) { 963 Instruction &UsrI = *cast<Instruction>(Usr); 964 // Ignore outputs to LT intrinsics, code extraction for the merged 965 // parallel region will fix them. 966 if (UsrI.isLifetimeStartOrEnd()) 967 continue; 968 969 if (UsrI.getParent() != SeqStartBB) 970 OutsideUsers.insert(&UsrI); 971 } 972 973 if (OutsideUsers.empty()) 974 continue; 975 976 // Emit an alloca in the outer region to store the broadcasted 977 // value. 978 const DataLayout &DL = M.getDataLayout(); 979 AllocaInst *AllocaI = new AllocaInst( 980 I.getType(), DL.getAllocaAddrSpace(), nullptr, 981 I.getName() + ".seq.output.alloc", &OuterFn->front().front()); 982 983 // Emit a store instruction in the sequential BB to update the 984 // value. 985 new StoreInst(&I, AllocaI, SeqStartBB->getTerminator()); 986 987 // Emit a load instruction and replace the use of the output value 988 // with it. 989 for (Instruction *UsrI : OutsideUsers) { 990 LoadInst *LoadI = new LoadInst( 991 I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI); 992 UsrI->replaceUsesOfWith(&I, LoadI); 993 } 994 } 995 996 OpenMPIRBuilder::LocationDescription Loc( 997 InsertPointTy(ParentBB, ParentBB->end()), DL); 998 InsertPointTy SeqAfterIP = 999 OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB); 1000 1001 OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel); 1002 1003 BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock()); 1004 1005 LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn 1006 << "\n"); 1007 }; 1008 1009 // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all 1010 // contained in BB and only separated by instructions that can be 1011 // redundantly executed in parallel. The block BB is split before the first 1012 // call (in MergableCIs) and after the last so the entire region we merge 1013 // into a single parallel region is contained in a single basic block 1014 // without any other instructions. We use the OpenMPIRBuilder to outline 1015 // that block and call the resulting function via __kmpc_fork_call. 1016 auto Merge = [&](SmallVectorImpl<CallInst *> &MergableCIs, BasicBlock *BB) { 1017 // TODO: Change the interface to allow single CIs expanded, e.g, to 1018 // include an outer loop. 1019 assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs"); 1020 1021 auto Remark = [&](OptimizationRemark OR) { 1022 OR << "Parallel region merged with parallel region" 1023 << (MergableCIs.size() > 2 ? "s" : "") << " at "; 1024 for (auto *CI : llvm::drop_begin(MergableCIs)) { 1025 OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()); 1026 if (CI != MergableCIs.back()) 1027 OR << ", "; 1028 } 1029 return OR << "."; 1030 }; 1031 1032 emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark); 1033 1034 Function *OriginalFn = BB->getParent(); 1035 LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size() 1036 << " parallel regions in " << OriginalFn->getName() 1037 << "\n"); 1038 1039 // Isolate the calls to merge in a separate block. 1040 EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI); 1041 BasicBlock *AfterBB = 1042 SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI); 1043 StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr, 1044 "omp.par.merged"); 1045 1046 assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG"); 1047 const DebugLoc DL = BB->getTerminator()->getDebugLoc(); 1048 BB->getTerminator()->eraseFromParent(); 1049 1050 // Create sequential regions for sequential instructions that are 1051 // in-between mergable parallel regions. 1052 for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1; 1053 It != End; ++It) { 1054 Instruction *ForkCI = *It; 1055 Instruction *NextForkCI = *(It + 1); 1056 1057 // Continue if there are not in-between instructions. 1058 if (ForkCI->getNextNode() == NextForkCI) 1059 continue; 1060 1061 CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(), 1062 NextForkCI->getPrevNode()); 1063 } 1064 1065 OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()), 1066 DL); 1067 IRBuilder<>::InsertPoint AllocaIP( 1068 &OriginalFn->getEntryBlock(), 1069 OriginalFn->getEntryBlock().getFirstInsertionPt()); 1070 // Create the merged parallel region with default proc binding, to 1071 // avoid overriding binding settings, and without explicit cancellation. 1072 InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel( 1073 Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr, 1074 OMP_PROC_BIND_default, /* IsCancellable */ false); 1075 BranchInst::Create(AfterBB, AfterIP.getBlock()); 1076 1077 // Perform the actual outlining. 1078 OMPInfoCache.OMPBuilder.finalize(OriginalFn, 1079 /* AllowExtractorSinking */ true); 1080 1081 Function *OutlinedFn = MergableCIs.front()->getCaller(); 1082 1083 // Replace the __kmpc_fork_call calls with direct calls to the outlined 1084 // callbacks. 1085 SmallVector<Value *, 8> Args; 1086 for (auto *CI : MergableCIs) { 1087 Value *Callee = 1088 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts(); 1089 FunctionType *FT = 1090 cast<FunctionType>(Callee->getType()->getPointerElementType()); 1091 Args.clear(); 1092 Args.push_back(OutlinedFn->getArg(0)); 1093 Args.push_back(OutlinedFn->getArg(1)); 1094 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1095 ++U) 1096 Args.push_back(CI->getArgOperand(U)); 1097 1098 CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI); 1099 if (CI->getDebugLoc()) 1100 NewCI->setDebugLoc(CI->getDebugLoc()); 1101 1102 // Forward parameter attributes from the callback to the callee. 1103 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1104 ++U) 1105 for (const Attribute &A : CI->getAttributes().getParamAttrs(U)) 1106 NewCI->addParamAttr( 1107 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A); 1108 1109 // Emit an explicit barrier to replace the implicit fork-join barrier. 1110 if (CI != MergableCIs.back()) { 1111 // TODO: Remove barrier if the merged parallel region includes the 1112 // 'nowait' clause. 1113 OMPInfoCache.OMPBuilder.createBarrier( 1114 InsertPointTy(NewCI->getParent(), 1115 NewCI->getNextNode()->getIterator()), 1116 OMPD_parallel); 1117 } 1118 1119 CI->eraseFromParent(); 1120 } 1121 1122 assert(OutlinedFn != OriginalFn && "Outlining failed"); 1123 CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); 1124 CGUpdater.reanalyzeFunction(*OriginalFn); 1125 1126 NumOpenMPParallelRegionsMerged += MergableCIs.size(); 1127 1128 return true; 1129 }; 1130 1131 // Helper function that identifes sequences of 1132 // __kmpc_fork_call uses in a basic block. 1133 auto DetectPRsCB = [&](Use &U, Function &F) { 1134 CallInst *CI = getCallIfRegularCall(U, &RFI); 1135 BB2PRMap[CI->getParent()].insert(CI); 1136 1137 return false; 1138 }; 1139 1140 BB2PRMap.clear(); 1141 RFI.foreachUse(SCC, DetectPRsCB); 1142 SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector; 1143 // Find mergable parallel regions within a basic block that are 1144 // safe to merge, that is any in-between instructions can safely 1145 // execute in parallel after merging. 1146 // TODO: support merging across basic-blocks. 1147 for (auto &It : BB2PRMap) { 1148 auto &CIs = It.getSecond(); 1149 if (CIs.size() < 2) 1150 continue; 1151 1152 BasicBlock *BB = It.getFirst(); 1153 SmallVector<CallInst *, 4> MergableCIs; 1154 1155 /// Returns true if the instruction is mergable, false otherwise. 1156 /// A terminator instruction is unmergable by definition since merging 1157 /// works within a BB. Instructions before the mergable region are 1158 /// mergable if they are not calls to OpenMP runtime functions that may 1159 /// set different execution parameters for subsequent parallel regions. 1160 /// Instructions in-between parallel regions are mergable if they are not 1161 /// calls to any non-intrinsic function since that may call a non-mergable 1162 /// OpenMP runtime function. 1163 auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) { 1164 // We do not merge across BBs, hence return false (unmergable) if the 1165 // instruction is a terminator. 1166 if (I.isTerminator()) 1167 return false; 1168 1169 if (!isa<CallInst>(&I)) 1170 return true; 1171 1172 CallInst *CI = cast<CallInst>(&I); 1173 if (IsBeforeMergableRegion) { 1174 Function *CalledFunction = CI->getCalledFunction(); 1175 if (!CalledFunction) 1176 return false; 1177 // Return false (unmergable) if the call before the parallel 1178 // region calls an explicit affinity (proc_bind) or number of 1179 // threads (num_threads) compiler-generated function. Those settings 1180 // may be incompatible with following parallel regions. 1181 // TODO: ICV tracking to detect compatibility. 1182 for (const auto &RFI : UnmergableCallsInfo) { 1183 if (CalledFunction == RFI.Declaration) 1184 return false; 1185 } 1186 } else { 1187 // Return false (unmergable) if there is a call instruction 1188 // in-between parallel regions when it is not an intrinsic. It 1189 // may call an unmergable OpenMP runtime function in its callpath. 1190 // TODO: Keep track of possible OpenMP calls in the callpath. 1191 if (!isa<IntrinsicInst>(CI)) 1192 return false; 1193 } 1194 1195 return true; 1196 }; 1197 // Find maximal number of parallel region CIs that are safe to merge. 1198 for (auto It = BB->begin(), End = BB->end(); It != End;) { 1199 Instruction &I = *It; 1200 ++It; 1201 1202 if (CIs.count(&I)) { 1203 MergableCIs.push_back(cast<CallInst>(&I)); 1204 continue; 1205 } 1206 1207 // Continue expanding if the instruction is mergable. 1208 if (IsMergable(I, MergableCIs.empty())) 1209 continue; 1210 1211 // Forward the instruction iterator to skip the next parallel region 1212 // since there is an unmergable instruction which can affect it. 1213 for (; It != End; ++It) { 1214 Instruction &SkipI = *It; 1215 if (CIs.count(&SkipI)) { 1216 LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI 1217 << " due to " << I << "\n"); 1218 ++It; 1219 break; 1220 } 1221 } 1222 1223 // Store mergable regions found. 1224 if (MergableCIs.size() > 1) { 1225 MergableCIsVector.push_back(MergableCIs); 1226 LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size() 1227 << " parallel regions in block " << BB->getName() 1228 << " of function " << BB->getParent()->getName() 1229 << "\n";); 1230 } 1231 1232 MergableCIs.clear(); 1233 } 1234 1235 if (!MergableCIsVector.empty()) { 1236 Changed = true; 1237 1238 for (auto &MergableCIs : MergableCIsVector) 1239 Merge(MergableCIs, BB); 1240 MergableCIsVector.clear(); 1241 } 1242 } 1243 1244 if (Changed) { 1245 /// Re-collect use for fork calls, emitted barrier calls, and 1246 /// any emitted master/end_master calls. 1247 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call); 1248 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier); 1249 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master); 1250 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master); 1251 } 1252 1253 return Changed; 1254 } 1255 1256 /// Try to delete parallel regions if possible. 1257 bool deleteParallelRegions() { 1258 const unsigned CallbackCalleeOperand = 2; 1259 1260 OMPInformationCache::RuntimeFunctionInfo &RFI = 1261 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 1262 1263 if (!RFI.Declaration) 1264 return false; 1265 1266 bool Changed = false; 1267 auto DeleteCallCB = [&](Use &U, Function &) { 1268 CallInst *CI = getCallIfRegularCall(U); 1269 if (!CI) 1270 return false; 1271 auto *Fn = dyn_cast<Function>( 1272 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 1273 if (!Fn) 1274 return false; 1275 if (!Fn->onlyReadsMemory()) 1276 return false; 1277 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 1278 return false; 1279 1280 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 1281 << CI->getCaller()->getName() << "\n"); 1282 1283 auto Remark = [&](OptimizationRemark OR) { 1284 return OR << "Removing parallel region with no side-effects."; 1285 }; 1286 emitRemark<OptimizationRemark>(CI, "OMP160", Remark); 1287 1288 CGUpdater.removeCallSite(*CI); 1289 CI->eraseFromParent(); 1290 Changed = true; 1291 ++NumOpenMPParallelRegionsDeleted; 1292 return true; 1293 }; 1294 1295 RFI.foreachUse(SCC, DeleteCallCB); 1296 1297 return Changed; 1298 } 1299 1300 /// Try to eliminate runtime calls by reusing existing ones. 1301 bool deduplicateRuntimeCalls() { 1302 bool Changed = false; 1303 1304 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 1305 OMPRTL_omp_get_num_threads, 1306 OMPRTL_omp_in_parallel, 1307 OMPRTL_omp_get_cancellation, 1308 OMPRTL_omp_get_thread_limit, 1309 OMPRTL_omp_get_supported_active_levels, 1310 OMPRTL_omp_get_level, 1311 OMPRTL_omp_get_ancestor_thread_num, 1312 OMPRTL_omp_get_team_size, 1313 OMPRTL_omp_get_active_level, 1314 OMPRTL_omp_in_final, 1315 OMPRTL_omp_get_proc_bind, 1316 OMPRTL_omp_get_num_places, 1317 OMPRTL_omp_get_num_procs, 1318 OMPRTL_omp_get_place_num, 1319 OMPRTL_omp_get_partition_num_places, 1320 OMPRTL_omp_get_partition_place_nums}; 1321 1322 // Global-tid is handled separately. 1323 SmallSetVector<Value *, 16> GTIdArgs; 1324 collectGlobalThreadIdArguments(GTIdArgs); 1325 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 1326 << " global thread ID arguments\n"); 1327 1328 for (Function *F : SCC) { 1329 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 1330 Changed |= deduplicateRuntimeCalls( 1331 *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 1332 1333 // __kmpc_global_thread_num is special as we can replace it with an 1334 // argument in enough cases to make it worth trying. 1335 Value *GTIdArg = nullptr; 1336 for (Argument &Arg : F->args()) 1337 if (GTIdArgs.count(&Arg)) { 1338 GTIdArg = &Arg; 1339 break; 1340 } 1341 Changed |= deduplicateRuntimeCalls( 1342 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 1343 } 1344 1345 return Changed; 1346 } 1347 1348 /// Tries to hide the latency of runtime calls that involve host to 1349 /// device memory transfers by splitting them into their "issue" and "wait" 1350 /// versions. The "issue" is moved upwards as much as possible. The "wait" is 1351 /// moved downards as much as possible. The "issue" issues the memory transfer 1352 /// asynchronously, returning a handle. The "wait" waits in the returned 1353 /// handle for the memory transfer to finish. 1354 bool hideMemTransfersLatency() { 1355 auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; 1356 bool Changed = false; 1357 auto SplitMemTransfers = [&](Use &U, Function &Decl) { 1358 auto *RTCall = getCallIfRegularCall(U, &RFI); 1359 if (!RTCall) 1360 return false; 1361 1362 OffloadArray OffloadArrays[3]; 1363 if (!getValuesInOffloadArrays(*RTCall, OffloadArrays)) 1364 return false; 1365 1366 LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays)); 1367 1368 // TODO: Check if can be moved upwards. 1369 bool WasSplit = false; 1370 Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); 1371 if (WaitMovementPoint) 1372 WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); 1373 1374 Changed |= WasSplit; 1375 return WasSplit; 1376 }; 1377 RFI.foreachUse(SCC, SplitMemTransfers); 1378 1379 return Changed; 1380 } 1381 1382 void analysisGlobalization() { 1383 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 1384 1385 auto CheckGlobalization = [&](Use &U, Function &Decl) { 1386 if (CallInst *CI = getCallIfRegularCall(U, &RFI)) { 1387 auto Remark = [&](OptimizationRemarkMissed ORM) { 1388 return ORM 1389 << "Found thread data sharing on the GPU. " 1390 << "Expect degraded performance due to data globalization."; 1391 }; 1392 emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark); 1393 } 1394 1395 return false; 1396 }; 1397 1398 RFI.foreachUse(SCC, CheckGlobalization); 1399 } 1400 1401 /// Maps the values stored in the offload arrays passed as arguments to 1402 /// \p RuntimeCall into the offload arrays in \p OAs. 1403 bool getValuesInOffloadArrays(CallInst &RuntimeCall, 1404 MutableArrayRef<OffloadArray> OAs) { 1405 assert(OAs.size() == 3 && "Need space for three offload arrays!"); 1406 1407 // A runtime call that involves memory offloading looks something like: 1408 // call void @__tgt_target_data_begin_mapper(arg0, arg1, 1409 // i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes, 1410 // ...) 1411 // So, the idea is to access the allocas that allocate space for these 1412 // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes. 1413 // Therefore: 1414 // i8** %offload_baseptrs. 1415 Value *BasePtrsArg = 1416 RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum); 1417 // i8** %offload_ptrs. 1418 Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum); 1419 // i8** %offload_sizes. 1420 Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum); 1421 1422 // Get values stored in **offload_baseptrs. 1423 auto *V = getUnderlyingObject(BasePtrsArg); 1424 if (!isa<AllocaInst>(V)) 1425 return false; 1426 auto *BasePtrsArray = cast<AllocaInst>(V); 1427 if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall)) 1428 return false; 1429 1430 // Get values stored in **offload_baseptrs. 1431 V = getUnderlyingObject(PtrsArg); 1432 if (!isa<AllocaInst>(V)) 1433 return false; 1434 auto *PtrsArray = cast<AllocaInst>(V); 1435 if (!OAs[1].initialize(*PtrsArray, RuntimeCall)) 1436 return false; 1437 1438 // Get values stored in **offload_sizes. 1439 V = getUnderlyingObject(SizesArg); 1440 // If it's a [constant] global array don't analyze it. 1441 if (isa<GlobalValue>(V)) 1442 return isa<Constant>(V); 1443 if (!isa<AllocaInst>(V)) 1444 return false; 1445 1446 auto *SizesArray = cast<AllocaInst>(V); 1447 if (!OAs[2].initialize(*SizesArray, RuntimeCall)) 1448 return false; 1449 1450 return true; 1451 } 1452 1453 /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG. 1454 /// For now this is a way to test that the function getValuesInOffloadArrays 1455 /// is working properly. 1456 /// TODO: Move this to a unittest when unittests are available for OpenMPOpt. 1457 void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) { 1458 assert(OAs.size() == 3 && "There are three offload arrays to debug!"); 1459 1460 LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n"); 1461 std::string ValuesStr; 1462 raw_string_ostream Printer(ValuesStr); 1463 std::string Separator = " --- "; 1464 1465 for (auto *BP : OAs[0].StoredValues) { 1466 BP->print(Printer); 1467 Printer << Separator; 1468 } 1469 LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n"); 1470 ValuesStr.clear(); 1471 1472 for (auto *P : OAs[1].StoredValues) { 1473 P->print(Printer); 1474 Printer << Separator; 1475 } 1476 LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n"); 1477 ValuesStr.clear(); 1478 1479 for (auto *S : OAs[2].StoredValues) { 1480 S->print(Printer); 1481 Printer << Separator; 1482 } 1483 LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n"); 1484 } 1485 1486 /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be 1487 /// moved. Returns nullptr if the movement is not possible, or not worth it. 1488 Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { 1489 // FIXME: This traverses only the BasicBlock where RuntimeCall is. 1490 // Make it traverse the CFG. 1491 1492 Instruction *CurrentI = &RuntimeCall; 1493 bool IsWorthIt = false; 1494 while ((CurrentI = CurrentI->getNextNode())) { 1495 1496 // TODO: Once we detect the regions to be offloaded we should use the 1497 // alias analysis manager to check if CurrentI may modify one of 1498 // the offloaded regions. 1499 if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { 1500 if (IsWorthIt) 1501 return CurrentI; 1502 1503 return nullptr; 1504 } 1505 1506 // FIXME: For now if we move it over anything without side effect 1507 // is worth it. 1508 IsWorthIt = true; 1509 } 1510 1511 // Return end of BasicBlock. 1512 return RuntimeCall.getParent()->getTerminator(); 1513 } 1514 1515 /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. 1516 bool splitTargetDataBeginRTC(CallInst &RuntimeCall, 1517 Instruction &WaitMovementPoint) { 1518 // Create stack allocated handle (__tgt_async_info) at the beginning of the 1519 // function. Used for storing information of the async transfer, allowing to 1520 // wait on it later. 1521 auto &IRBuilder = OMPInfoCache.OMPBuilder; 1522 auto *F = RuntimeCall.getCaller(); 1523 Instruction *FirstInst = &(F->getEntryBlock().front()); 1524 AllocaInst *Handle = new AllocaInst( 1525 IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst); 1526 1527 // Add "issue" runtime call declaration: 1528 // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, 1529 // i8**, i8**, i64*, i64*) 1530 FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( 1531 M, OMPRTL___tgt_target_data_begin_mapper_issue); 1532 1533 // Change RuntimeCall call site for its asynchronous version. 1534 SmallVector<Value *, 16> Args; 1535 for (auto &Arg : RuntimeCall.args()) 1536 Args.push_back(Arg.get()); 1537 Args.push_back(Handle); 1538 1539 CallInst *IssueCallsite = 1540 CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall); 1541 RuntimeCall.eraseFromParent(); 1542 1543 // Add "wait" runtime call declaration: 1544 // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) 1545 FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( 1546 M, OMPRTL___tgt_target_data_begin_mapper_wait); 1547 1548 Value *WaitParams[2] = { 1549 IssueCallsite->getArgOperand( 1550 OffloadArray::DeviceIDArgNum), // device_id. 1551 Handle // handle to wait on. 1552 }; 1553 CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); 1554 1555 return true; 1556 } 1557 1558 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 1559 bool GlobalOnly, bool &SingleChoice) { 1560 if (CurrentIdent == NextIdent) 1561 return CurrentIdent; 1562 1563 // TODO: Figure out how to actually combine multiple debug locations. For 1564 // now we just keep an existing one if there is a single choice. 1565 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 1566 SingleChoice = !CurrentIdent; 1567 return NextIdent; 1568 } 1569 return nullptr; 1570 } 1571 1572 /// Return an `struct ident_t*` value that represents the ones used in the 1573 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 1574 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 1575 /// return value we create one from scratch. We also do not yet combine 1576 /// information, e.g., the source locations, see combinedIdentStruct. 1577 Value * 1578 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 1579 Function &F, bool GlobalOnly) { 1580 bool SingleChoice = true; 1581 Value *Ident = nullptr; 1582 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 1583 CallInst *CI = getCallIfRegularCall(U, &RFI); 1584 if (!CI || &F != &Caller) 1585 return false; 1586 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 1587 /* GlobalOnly */ true, SingleChoice); 1588 return false; 1589 }; 1590 RFI.foreachUse(SCC, CombineIdentStruct); 1591 1592 if (!Ident || !SingleChoice) { 1593 // The IRBuilder uses the insertion block to get to the module, this is 1594 // unfortunate but we work around it for now. 1595 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 1596 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 1597 &F.getEntryBlock(), F.getEntryBlock().begin())); 1598 // Create a fallback location if non was found. 1599 // TODO: Use the debug locations of the calls instead. 1600 Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(); 1601 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc); 1602 } 1603 return Ident; 1604 } 1605 1606 /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or 1607 /// \p ReplVal if given. 1608 bool deduplicateRuntimeCalls(Function &F, 1609 OMPInformationCache::RuntimeFunctionInfo &RFI, 1610 Value *ReplVal = nullptr) { 1611 auto *UV = RFI.getUseVector(F); 1612 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 1613 return false; 1614 1615 LLVM_DEBUG( 1616 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 1617 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 1618 1619 assert((!ReplVal || (isa<Argument>(ReplVal) && 1620 cast<Argument>(ReplVal)->getParent() == &F)) && 1621 "Unexpected replacement value!"); 1622 1623 // TODO: Use dominance to find a good position instead. 1624 auto CanBeMoved = [this](CallBase &CB) { 1625 unsigned NumArgs = CB.arg_size(); 1626 if (NumArgs == 0) 1627 return true; 1628 if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) 1629 return false; 1630 for (unsigned U = 1; U < NumArgs; ++U) 1631 if (isa<Instruction>(CB.getArgOperand(U))) 1632 return false; 1633 return true; 1634 }; 1635 1636 if (!ReplVal) { 1637 for (Use *U : *UV) 1638 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 1639 if (!CanBeMoved(*CI)) 1640 continue; 1641 1642 // If the function is a kernel, dedup will move 1643 // the runtime call right after the kernel init callsite. Otherwise, 1644 // it will move it to the beginning of the caller function. 1645 if (isKernel(F)) { 1646 auto &KernelInitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 1647 auto *KernelInitUV = KernelInitRFI.getUseVector(F); 1648 1649 if (KernelInitUV->empty()) 1650 continue; 1651 1652 assert(KernelInitUV->size() == 1 && 1653 "Expected a single __kmpc_target_init in kernel\n"); 1654 1655 CallInst *KernelInitCI = 1656 getCallIfRegularCall(*KernelInitUV->front(), &KernelInitRFI); 1657 assert(KernelInitCI && 1658 "Expected a call to __kmpc_target_init in kernel\n"); 1659 1660 CI->moveAfter(KernelInitCI); 1661 } else 1662 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 1663 ReplVal = CI; 1664 break; 1665 } 1666 if (!ReplVal) 1667 return false; 1668 } 1669 1670 // If we use a call as a replacement value we need to make sure the ident is 1671 // valid at the new location. For now we just pick a global one, either 1672 // existing and used by one of the calls, or created from scratch. 1673 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 1674 if (!CI->arg_empty() && 1675 CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { 1676 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 1677 /* GlobalOnly */ true); 1678 CI->setArgOperand(0, Ident); 1679 } 1680 } 1681 1682 bool Changed = false; 1683 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 1684 CallInst *CI = getCallIfRegularCall(U, &RFI); 1685 if (!CI || CI == ReplVal || &F != &Caller) 1686 return false; 1687 assert(CI->getCaller() == &F && "Unexpected call!"); 1688 1689 auto Remark = [&](OptimizationRemark OR) { 1690 return OR << "OpenMP runtime call " 1691 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated."; 1692 }; 1693 if (CI->getDebugLoc()) 1694 emitRemark<OptimizationRemark>(CI, "OMP170", Remark); 1695 else 1696 emitRemark<OptimizationRemark>(&F, "OMP170", Remark); 1697 1698 CGUpdater.removeCallSite(*CI); 1699 CI->replaceAllUsesWith(ReplVal); 1700 CI->eraseFromParent(); 1701 ++NumOpenMPRuntimeCallsDeduplicated; 1702 Changed = true; 1703 return true; 1704 }; 1705 RFI.foreachUse(SCC, ReplaceAndDeleteCB); 1706 1707 return Changed; 1708 } 1709 1710 /// Collect arguments that represent the global thread id in \p GTIdArgs. 1711 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 1712 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 1713 // initialization. We could define an AbstractAttribute instead and 1714 // run the Attributor here once it can be run as an SCC pass. 1715 1716 // Helper to check the argument \p ArgNo at all call sites of \p F for 1717 // a GTId. 1718 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 1719 if (!F.hasLocalLinkage()) 1720 return false; 1721 for (Use &U : F.uses()) { 1722 if (CallInst *CI = getCallIfRegularCall(U)) { 1723 Value *ArgOp = CI->getArgOperand(ArgNo); 1724 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 1725 getCallIfRegularCall( 1726 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 1727 continue; 1728 } 1729 return false; 1730 } 1731 return true; 1732 }; 1733 1734 // Helper to identify uses of a GTId as GTId arguments. 1735 auto AddUserArgs = [&](Value >Id) { 1736 for (Use &U : GTId.uses()) 1737 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 1738 if (CI->isArgOperand(&U)) 1739 if (Function *Callee = CI->getCalledFunction()) 1740 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 1741 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 1742 }; 1743 1744 // The argument users of __kmpc_global_thread_num calls are GTIds. 1745 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 1746 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 1747 1748 GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { 1749 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 1750 AddUserArgs(*CI); 1751 return false; 1752 }); 1753 1754 // Transitively search for more arguments by looking at the users of the 1755 // ones we know already. During the search the GTIdArgs vector is extended 1756 // so we cannot cache the size nor can we use a range based for. 1757 for (unsigned U = 0; U < GTIdArgs.size(); ++U) 1758 AddUserArgs(*GTIdArgs[U]); 1759 } 1760 1761 /// Kernel (=GPU) optimizations and utility functions 1762 /// 1763 ///{{ 1764 1765 /// Check if \p F is a kernel, hence entry point for target offloading. 1766 bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); } 1767 1768 /// Cache to remember the unique kernel for a function. 1769 DenseMap<Function *, Optional<Kernel>> UniqueKernelMap; 1770 1771 /// Find the unique kernel that will execute \p F, if any. 1772 Kernel getUniqueKernelFor(Function &F); 1773 1774 /// Find the unique kernel that will execute \p I, if any. 1775 Kernel getUniqueKernelFor(Instruction &I) { 1776 return getUniqueKernelFor(*I.getFunction()); 1777 } 1778 1779 /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in 1780 /// the cases we can avoid taking the address of a function. 1781 bool rewriteDeviceCodeStateMachine(); 1782 1783 /// 1784 ///}} 1785 1786 /// Emit a remark generically 1787 /// 1788 /// This template function can be used to generically emit a remark. The 1789 /// RemarkKind should be one of the following: 1790 /// - OptimizationRemark to indicate a successful optimization attempt 1791 /// - OptimizationRemarkMissed to report a failed optimization attempt 1792 /// - OptimizationRemarkAnalysis to provide additional information about an 1793 /// optimization attempt 1794 /// 1795 /// The remark is built using a callback function provided by the caller that 1796 /// takes a RemarkKind as input and returns a RemarkKind. 1797 template <typename RemarkKind, typename RemarkCallBack> 1798 void emitRemark(Instruction *I, StringRef RemarkName, 1799 RemarkCallBack &&RemarkCB) const { 1800 Function *F = I->getParent()->getParent(); 1801 auto &ORE = OREGetter(F); 1802 1803 if (RemarkName.startswith("OMP")) 1804 ORE.emit([&]() { 1805 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)) 1806 << " [" << RemarkName << "]"; 1807 }); 1808 else 1809 ORE.emit( 1810 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); }); 1811 } 1812 1813 /// Emit a remark on a function. 1814 template <typename RemarkKind, typename RemarkCallBack> 1815 void emitRemark(Function *F, StringRef RemarkName, 1816 RemarkCallBack &&RemarkCB) const { 1817 auto &ORE = OREGetter(F); 1818 1819 if (RemarkName.startswith("OMP")) 1820 ORE.emit([&]() { 1821 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)) 1822 << " [" << RemarkName << "]"; 1823 }); 1824 else 1825 ORE.emit( 1826 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); }); 1827 } 1828 1829 /// RAII struct to temporarily change an RTL function's linkage to external. 1830 /// This prevents it from being mistakenly removed by other optimizations. 1831 struct ExternalizationRAII { 1832 ExternalizationRAII(OMPInformationCache &OMPInfoCache, 1833 RuntimeFunction RFKind) 1834 : Declaration(OMPInfoCache.RFIs[RFKind].Declaration) { 1835 if (!Declaration) 1836 return; 1837 1838 LinkageType = Declaration->getLinkage(); 1839 Declaration->setLinkage(GlobalValue::ExternalLinkage); 1840 } 1841 1842 ~ExternalizationRAII() { 1843 if (!Declaration) 1844 return; 1845 1846 Declaration->setLinkage(LinkageType); 1847 } 1848 1849 Function *Declaration; 1850 GlobalValue::LinkageTypes LinkageType; 1851 }; 1852 1853 /// The underlying module. 1854 Module &M; 1855 1856 /// The SCC we are operating on. 1857 SmallVectorImpl<Function *> &SCC; 1858 1859 /// Callback to update the call graph, the first argument is a removed call, 1860 /// the second an optional replacement call. 1861 CallGraphUpdater &CGUpdater; 1862 1863 /// Callback to get an OptimizationRemarkEmitter from a Function * 1864 OptimizationRemarkGetter OREGetter; 1865 1866 /// OpenMP-specific information cache. Also Used for Attributor runs. 1867 OMPInformationCache &OMPInfoCache; 1868 1869 /// Attributor instance. 1870 Attributor &A; 1871 1872 /// Helper function to run Attributor on SCC. 1873 bool runAttributor(bool IsModulePass) { 1874 if (SCC.empty()) 1875 return false; 1876 1877 // Temporarily make these function have external linkage so the Attributor 1878 // doesn't remove them when we try to look them up later. 1879 ExternalizationRAII Parallel(OMPInfoCache, OMPRTL___kmpc_kernel_parallel); 1880 ExternalizationRAII EndParallel(OMPInfoCache, 1881 OMPRTL___kmpc_kernel_end_parallel); 1882 ExternalizationRAII BarrierSPMD(OMPInfoCache, 1883 OMPRTL___kmpc_barrier_simple_spmd); 1884 ExternalizationRAII BarrierGeneric(OMPInfoCache, 1885 OMPRTL___kmpc_barrier_simple_generic); 1886 ExternalizationRAII ThreadId(OMPInfoCache, 1887 OMPRTL___kmpc_get_hardware_thread_id_in_block); 1888 ExternalizationRAII WarpSize(OMPInfoCache, OMPRTL___kmpc_get_warp_size); 1889 1890 registerAAs(IsModulePass); 1891 1892 ChangeStatus Changed = A.run(); 1893 1894 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 1895 << " functions, result: " << Changed << ".\n"); 1896 1897 return Changed == ChangeStatus::CHANGED; 1898 } 1899 1900 void registerFoldRuntimeCall(RuntimeFunction RF); 1901 1902 /// Populate the Attributor with abstract attribute opportunities in the 1903 /// function. 1904 void registerAAs(bool IsModulePass); 1905 }; 1906 1907 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { 1908 if (!OMPInfoCache.ModuleSlice.count(&F)) 1909 return nullptr; 1910 1911 // Use a scope to keep the lifetime of the CachedKernel short. 1912 { 1913 Optional<Kernel> &CachedKernel = UniqueKernelMap[&F]; 1914 if (CachedKernel) 1915 return *CachedKernel; 1916 1917 // TODO: We should use an AA to create an (optimistic and callback 1918 // call-aware) call graph. For now we stick to simple patterns that 1919 // are less powerful, basically the worst fixpoint. 1920 if (isKernel(F)) { 1921 CachedKernel = Kernel(&F); 1922 return *CachedKernel; 1923 } 1924 1925 CachedKernel = nullptr; 1926 if (!F.hasLocalLinkage()) { 1927 1928 // See https://openmp.llvm.org/remarks/OptimizationRemarks.html 1929 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 1930 return ORA << "Potentially unknown OpenMP target region caller."; 1931 }; 1932 emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark); 1933 1934 return nullptr; 1935 } 1936 } 1937 1938 auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { 1939 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 1940 // Allow use in equality comparisons. 1941 if (Cmp->isEquality()) 1942 return getUniqueKernelFor(*Cmp); 1943 return nullptr; 1944 } 1945 if (auto *CB = dyn_cast<CallBase>(U.getUser())) { 1946 // Allow direct calls. 1947 if (CB->isCallee(&U)) 1948 return getUniqueKernelFor(*CB); 1949 1950 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 1951 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 1952 // Allow the use in __kmpc_parallel_51 calls. 1953 if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI)) 1954 return getUniqueKernelFor(*CB); 1955 return nullptr; 1956 } 1957 // Disallow every other use. 1958 return nullptr; 1959 }; 1960 1961 // TODO: In the future we want to track more than just a unique kernel. 1962 SmallPtrSet<Kernel, 2> PotentialKernels; 1963 OMPInformationCache::foreachUse(F, [&](const Use &U) { 1964 PotentialKernels.insert(GetUniqueKernelForUse(U)); 1965 }); 1966 1967 Kernel K = nullptr; 1968 if (PotentialKernels.size() == 1) 1969 K = *PotentialKernels.begin(); 1970 1971 // Cache the result. 1972 UniqueKernelMap[&F] = K; 1973 1974 return K; 1975 } 1976 1977 bool OpenMPOpt::rewriteDeviceCodeStateMachine() { 1978 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 1979 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 1980 1981 bool Changed = false; 1982 if (!KernelParallelRFI) 1983 return Changed; 1984 1985 // If we have disabled state machine changes, exit 1986 if (DisableOpenMPOptStateMachineRewrite) 1987 return Changed; 1988 1989 for (Function *F : SCC) { 1990 1991 // Check if the function is a use in a __kmpc_parallel_51 call at 1992 // all. 1993 bool UnknownUse = false; 1994 bool KernelParallelUse = false; 1995 unsigned NumDirectCalls = 0; 1996 1997 SmallVector<Use *, 2> ToBeReplacedStateMachineUses; 1998 OMPInformationCache::foreachUse(*F, [&](Use &U) { 1999 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 2000 if (CB->isCallee(&U)) { 2001 ++NumDirectCalls; 2002 return; 2003 } 2004 2005 if (isa<ICmpInst>(U.getUser())) { 2006 ToBeReplacedStateMachineUses.push_back(&U); 2007 return; 2008 } 2009 2010 // Find wrapper functions that represent parallel kernels. 2011 CallInst *CI = 2012 OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI); 2013 const unsigned int WrapperFunctionArgNo = 6; 2014 if (!KernelParallelUse && CI && 2015 CI->getArgOperandNo(&U) == WrapperFunctionArgNo) { 2016 KernelParallelUse = true; 2017 ToBeReplacedStateMachineUses.push_back(&U); 2018 return; 2019 } 2020 UnknownUse = true; 2021 }); 2022 2023 // Do not emit a remark if we haven't seen a __kmpc_parallel_51 2024 // use. 2025 if (!KernelParallelUse) 2026 continue; 2027 2028 // If this ever hits, we should investigate. 2029 // TODO: Checking the number of uses is not a necessary restriction and 2030 // should be lifted. 2031 if (UnknownUse || NumDirectCalls != 1 || 2032 ToBeReplacedStateMachineUses.size() > 2) { 2033 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2034 return ORA << "Parallel region is used in " 2035 << (UnknownUse ? "unknown" : "unexpected") 2036 << " ways. Will not attempt to rewrite the state machine."; 2037 }; 2038 emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark); 2039 continue; 2040 } 2041 2042 // Even if we have __kmpc_parallel_51 calls, we (for now) give 2043 // up if the function is not called from a unique kernel. 2044 Kernel K = getUniqueKernelFor(*F); 2045 if (!K) { 2046 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2047 return ORA << "Parallel region is not called from a unique kernel. " 2048 "Will not attempt to rewrite the state machine."; 2049 }; 2050 emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark); 2051 continue; 2052 } 2053 2054 // We now know F is a parallel body function called only from the kernel K. 2055 // We also identified the state machine uses in which we replace the 2056 // function pointer by a new global symbol for identification purposes. This 2057 // ensures only direct calls to the function are left. 2058 2059 Module &M = *F->getParent(); 2060 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 2061 2062 auto *ID = new GlobalVariable( 2063 M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, 2064 UndefValue::get(Int8Ty), F->getName() + ".ID"); 2065 2066 for (Use *U : ToBeReplacedStateMachineUses) 2067 U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast( 2068 ID, U->get()->getType())); 2069 2070 ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; 2071 2072 Changed = true; 2073 } 2074 2075 return Changed; 2076 } 2077 2078 /// Abstract Attribute for tracking ICV values. 2079 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 2080 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2081 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2082 2083 void initialize(Attributor &A) override { 2084 Function *F = getAnchorScope(); 2085 if (!F || !A.isFunctionIPOAmendable(*F)) 2086 indicatePessimisticFixpoint(); 2087 } 2088 2089 /// Returns true if value is assumed to be tracked. 2090 bool isAssumedTracked() const { return getAssumed(); } 2091 2092 /// Returns true if value is known to be tracked. 2093 bool isKnownTracked() const { return getAssumed(); } 2094 2095 /// Create an abstract attribute biew for the position \p IRP. 2096 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 2097 2098 /// Return the value with which \p I can be replaced for specific \p ICV. 2099 virtual Optional<Value *> getReplacementValue(InternalControlVar ICV, 2100 const Instruction *I, 2101 Attributor &A) const { 2102 return None; 2103 } 2104 2105 /// Return an assumed unique ICV value if a single candidate is found. If 2106 /// there cannot be one, return a nullptr. If it is not clear yet, return the 2107 /// Optional::NoneType. 2108 virtual Optional<Value *> 2109 getUniqueReplacementValue(InternalControlVar ICV) const = 0; 2110 2111 // Currently only nthreads is being tracked. 2112 // this array will only grow with time. 2113 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 2114 2115 /// See AbstractAttribute::getName() 2116 const std::string getName() const override { return "AAICVTracker"; } 2117 2118 /// See AbstractAttribute::getIdAddr() 2119 const char *getIdAddr() const override { return &ID; } 2120 2121 /// This function should return true if the type of the \p AA is AAICVTracker 2122 static bool classof(const AbstractAttribute *AA) { 2123 return (AA->getIdAddr() == &ID); 2124 } 2125 2126 static const char ID; 2127 }; 2128 2129 struct AAICVTrackerFunction : public AAICVTracker { 2130 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 2131 : AAICVTracker(IRP, A) {} 2132 2133 // FIXME: come up with better string. 2134 const std::string getAsStr() const override { return "ICVTrackerFunction"; } 2135 2136 // FIXME: come up with some stats. 2137 void trackStatistics() const override {} 2138 2139 /// We don't manifest anything for this AA. 2140 ChangeStatus manifest(Attributor &A) override { 2141 return ChangeStatus::UNCHANGED; 2142 } 2143 2144 // Map of ICV to their values at specific program point. 2145 EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar, 2146 InternalControlVar::ICV___last> 2147 ICVReplacementValuesMap; 2148 2149 ChangeStatus updateImpl(Attributor &A) override { 2150 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2151 2152 Function *F = getAnchorScope(); 2153 2154 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2155 2156 for (InternalControlVar ICV : TrackableICVs) { 2157 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2158 2159 auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2160 auto TrackValues = [&](Use &U, Function &) { 2161 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 2162 if (!CI) 2163 return false; 2164 2165 // FIXME: handle setters with more that 1 arguments. 2166 /// Track new value. 2167 if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) 2168 HasChanged = ChangeStatus::CHANGED; 2169 2170 return false; 2171 }; 2172 2173 auto CallCheck = [&](Instruction &I) { 2174 Optional<Value *> ReplVal = getValueForCall(A, &I, ICV); 2175 if (ReplVal.hasValue() && 2176 ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) 2177 HasChanged = ChangeStatus::CHANGED; 2178 2179 return true; 2180 }; 2181 2182 // Track all changes of an ICV. 2183 SetterRFI.foreachUse(TrackValues, F); 2184 2185 bool UsedAssumedInformation = false; 2186 A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, 2187 UsedAssumedInformation, 2188 /* CheckBBLivenessOnly */ true); 2189 2190 /// TODO: Figure out a way to avoid adding entry in 2191 /// ICVReplacementValuesMap 2192 Instruction *Entry = &F->getEntryBlock().front(); 2193 if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) 2194 ValuesMap.insert(std::make_pair(Entry, nullptr)); 2195 } 2196 2197 return HasChanged; 2198 } 2199 2200 /// Hepler to check if \p I is a call and get the value for it if it is 2201 /// unique. 2202 Optional<Value *> getValueForCall(Attributor &A, const Instruction *I, 2203 InternalControlVar &ICV) const { 2204 2205 const auto *CB = dyn_cast<CallBase>(I); 2206 if (!CB || CB->hasFnAttr("no_openmp") || 2207 CB->hasFnAttr("no_openmp_routines")) 2208 return None; 2209 2210 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2211 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 2212 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2213 Function *CalledFunction = CB->getCalledFunction(); 2214 2215 // Indirect call, assume ICV changes. 2216 if (CalledFunction == nullptr) 2217 return nullptr; 2218 if (CalledFunction == GetterRFI.Declaration) 2219 return None; 2220 if (CalledFunction == SetterRFI.Declaration) { 2221 if (ICVReplacementValuesMap[ICV].count(I)) 2222 return ICVReplacementValuesMap[ICV].lookup(I); 2223 2224 return nullptr; 2225 } 2226 2227 // Since we don't know, assume it changes the ICV. 2228 if (CalledFunction->isDeclaration()) 2229 return nullptr; 2230 2231 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2232 *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED); 2233 2234 if (ICVTrackingAA.isAssumedTracked()) 2235 return ICVTrackingAA.getUniqueReplacementValue(ICV); 2236 2237 // If we don't know, assume it changes. 2238 return nullptr; 2239 } 2240 2241 // We don't check unique value for a function, so return None. 2242 Optional<Value *> 2243 getUniqueReplacementValue(InternalControlVar ICV) const override { 2244 return None; 2245 } 2246 2247 /// Return the value with which \p I can be replaced for specific \p ICV. 2248 Optional<Value *> getReplacementValue(InternalControlVar ICV, 2249 const Instruction *I, 2250 Attributor &A) const override { 2251 const auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2252 if (ValuesMap.count(I)) 2253 return ValuesMap.lookup(I); 2254 2255 SmallVector<const Instruction *, 16> Worklist; 2256 SmallPtrSet<const Instruction *, 16> Visited; 2257 Worklist.push_back(I); 2258 2259 Optional<Value *> ReplVal; 2260 2261 while (!Worklist.empty()) { 2262 const Instruction *CurrInst = Worklist.pop_back_val(); 2263 if (!Visited.insert(CurrInst).second) 2264 continue; 2265 2266 const BasicBlock *CurrBB = CurrInst->getParent(); 2267 2268 // Go up and look for all potential setters/calls that might change the 2269 // ICV. 2270 while ((CurrInst = CurrInst->getPrevNode())) { 2271 if (ValuesMap.count(CurrInst)) { 2272 Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst); 2273 // Unknown value, track new. 2274 if (!ReplVal.hasValue()) { 2275 ReplVal = NewReplVal; 2276 break; 2277 } 2278 2279 // If we found a new value, we can't know the icv value anymore. 2280 if (NewReplVal.hasValue()) 2281 if (ReplVal != NewReplVal) 2282 return nullptr; 2283 2284 break; 2285 } 2286 2287 Optional<Value *> NewReplVal = getValueForCall(A, CurrInst, ICV); 2288 if (!NewReplVal.hasValue()) 2289 continue; 2290 2291 // Unknown value, track new. 2292 if (!ReplVal.hasValue()) { 2293 ReplVal = NewReplVal; 2294 break; 2295 } 2296 2297 // if (NewReplVal.hasValue()) 2298 // We found a new value, we can't know the icv value anymore. 2299 if (ReplVal != NewReplVal) 2300 return nullptr; 2301 } 2302 2303 // If we are in the same BB and we have a value, we are done. 2304 if (CurrBB == I->getParent() && ReplVal.hasValue()) 2305 return ReplVal; 2306 2307 // Go through all predecessors and add terminators for analysis. 2308 for (const BasicBlock *Pred : predecessors(CurrBB)) 2309 if (const Instruction *Terminator = Pred->getTerminator()) 2310 Worklist.push_back(Terminator); 2311 } 2312 2313 return ReplVal; 2314 } 2315 }; 2316 2317 struct AAICVTrackerFunctionReturned : AAICVTracker { 2318 AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) 2319 : AAICVTracker(IRP, A) {} 2320 2321 // FIXME: come up with better string. 2322 const std::string getAsStr() const override { 2323 return "ICVTrackerFunctionReturned"; 2324 } 2325 2326 // FIXME: come up with some stats. 2327 void trackStatistics() const override {} 2328 2329 /// We don't manifest anything for this AA. 2330 ChangeStatus manifest(Attributor &A) override { 2331 return ChangeStatus::UNCHANGED; 2332 } 2333 2334 // Map of ICV to their values at specific program point. 2335 EnumeratedArray<Optional<Value *>, InternalControlVar, 2336 InternalControlVar::ICV___last> 2337 ICVReplacementValuesMap; 2338 2339 /// Return the value with which \p I can be replaced for specific \p ICV. 2340 Optional<Value *> 2341 getUniqueReplacementValue(InternalControlVar ICV) const override { 2342 return ICVReplacementValuesMap[ICV]; 2343 } 2344 2345 ChangeStatus updateImpl(Attributor &A) override { 2346 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2347 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2348 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2349 2350 if (!ICVTrackingAA.isAssumedTracked()) 2351 return indicatePessimisticFixpoint(); 2352 2353 for (InternalControlVar ICV : TrackableICVs) { 2354 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2355 Optional<Value *> UniqueICVValue; 2356 2357 auto CheckReturnInst = [&](Instruction &I) { 2358 Optional<Value *> NewReplVal = 2359 ICVTrackingAA.getReplacementValue(ICV, &I, A); 2360 2361 // If we found a second ICV value there is no unique returned value. 2362 if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal) 2363 return false; 2364 2365 UniqueICVValue = NewReplVal; 2366 2367 return true; 2368 }; 2369 2370 bool UsedAssumedInformation = false; 2371 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, 2372 UsedAssumedInformation, 2373 /* CheckBBLivenessOnly */ true)) 2374 UniqueICVValue = nullptr; 2375 2376 if (UniqueICVValue == ReplVal) 2377 continue; 2378 2379 ReplVal = UniqueICVValue; 2380 Changed = ChangeStatus::CHANGED; 2381 } 2382 2383 return Changed; 2384 } 2385 }; 2386 2387 struct AAICVTrackerCallSite : AAICVTracker { 2388 AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) 2389 : AAICVTracker(IRP, A) {} 2390 2391 void initialize(Attributor &A) override { 2392 Function *F = getAnchorScope(); 2393 if (!F || !A.isFunctionIPOAmendable(*F)) 2394 indicatePessimisticFixpoint(); 2395 2396 // We only initialize this AA for getters, so we need to know which ICV it 2397 // gets. 2398 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2399 for (InternalControlVar ICV : TrackableICVs) { 2400 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 2401 auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; 2402 if (Getter.Declaration == getAssociatedFunction()) { 2403 AssociatedICV = ICVInfo.Kind; 2404 return; 2405 } 2406 } 2407 2408 /// Unknown ICV. 2409 indicatePessimisticFixpoint(); 2410 } 2411 2412 ChangeStatus manifest(Attributor &A) override { 2413 if (!ReplVal.hasValue() || !ReplVal.getValue()) 2414 return ChangeStatus::UNCHANGED; 2415 2416 A.changeValueAfterManifest(*getCtxI(), **ReplVal); 2417 A.deleteAfterManifest(*getCtxI()); 2418 2419 return ChangeStatus::CHANGED; 2420 } 2421 2422 // FIXME: come up with better string. 2423 const std::string getAsStr() const override { return "ICVTrackerCallSite"; } 2424 2425 // FIXME: come up with some stats. 2426 void trackStatistics() const override {} 2427 2428 InternalControlVar AssociatedICV; 2429 Optional<Value *> ReplVal; 2430 2431 ChangeStatus updateImpl(Attributor &A) override { 2432 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2433 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2434 2435 // We don't have any information, so we assume it changes the ICV. 2436 if (!ICVTrackingAA.isAssumedTracked()) 2437 return indicatePessimisticFixpoint(); 2438 2439 Optional<Value *> NewReplVal = 2440 ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A); 2441 2442 if (ReplVal == NewReplVal) 2443 return ChangeStatus::UNCHANGED; 2444 2445 ReplVal = NewReplVal; 2446 return ChangeStatus::CHANGED; 2447 } 2448 2449 // Return the value with which associated value can be replaced for specific 2450 // \p ICV. 2451 Optional<Value *> 2452 getUniqueReplacementValue(InternalControlVar ICV) const override { 2453 return ReplVal; 2454 } 2455 }; 2456 2457 struct AAICVTrackerCallSiteReturned : AAICVTracker { 2458 AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) 2459 : AAICVTracker(IRP, A) {} 2460 2461 // FIXME: come up with better string. 2462 const std::string getAsStr() const override { 2463 return "ICVTrackerCallSiteReturned"; 2464 } 2465 2466 // FIXME: come up with some stats. 2467 void trackStatistics() const override {} 2468 2469 /// We don't manifest anything for this AA. 2470 ChangeStatus manifest(Attributor &A) override { 2471 return ChangeStatus::UNCHANGED; 2472 } 2473 2474 // Map of ICV to their values at specific program point. 2475 EnumeratedArray<Optional<Value *>, InternalControlVar, 2476 InternalControlVar::ICV___last> 2477 ICVReplacementValuesMap; 2478 2479 /// Return the value with which associated value can be replaced for specific 2480 /// \p ICV. 2481 Optional<Value *> 2482 getUniqueReplacementValue(InternalControlVar ICV) const override { 2483 return ICVReplacementValuesMap[ICV]; 2484 } 2485 2486 ChangeStatus updateImpl(Attributor &A) override { 2487 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2488 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2489 *this, IRPosition::returned(*getAssociatedFunction()), 2490 DepClassTy::REQUIRED); 2491 2492 // We don't have any information, so we assume it changes the ICV. 2493 if (!ICVTrackingAA.isAssumedTracked()) 2494 return indicatePessimisticFixpoint(); 2495 2496 for (InternalControlVar ICV : TrackableICVs) { 2497 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2498 Optional<Value *> NewReplVal = 2499 ICVTrackingAA.getUniqueReplacementValue(ICV); 2500 2501 if (ReplVal == NewReplVal) 2502 continue; 2503 2504 ReplVal = NewReplVal; 2505 Changed = ChangeStatus::CHANGED; 2506 } 2507 return Changed; 2508 } 2509 }; 2510 2511 struct AAExecutionDomainFunction : public AAExecutionDomain { 2512 AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A) 2513 : AAExecutionDomain(IRP, A) {} 2514 2515 const std::string getAsStr() const override { 2516 return "[AAExecutionDomain] " + std::to_string(SingleThreadedBBs.size()) + 2517 "/" + std::to_string(NumBBs) + " BBs thread 0 only."; 2518 } 2519 2520 /// See AbstractAttribute::trackStatistics(). 2521 void trackStatistics() const override {} 2522 2523 void initialize(Attributor &A) override { 2524 Function *F = getAnchorScope(); 2525 for (const auto &BB : *F) 2526 SingleThreadedBBs.insert(&BB); 2527 NumBBs = SingleThreadedBBs.size(); 2528 } 2529 2530 ChangeStatus manifest(Attributor &A) override { 2531 LLVM_DEBUG({ 2532 for (const BasicBlock *BB : SingleThreadedBBs) 2533 dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " " 2534 << BB->getName() << " is executed by a single thread.\n"; 2535 }); 2536 return ChangeStatus::UNCHANGED; 2537 } 2538 2539 ChangeStatus updateImpl(Attributor &A) override; 2540 2541 /// Check if an instruction is executed by a single thread. 2542 bool isExecutedByInitialThreadOnly(const Instruction &I) const override { 2543 return isExecutedByInitialThreadOnly(*I.getParent()); 2544 } 2545 2546 bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override { 2547 return isValidState() && SingleThreadedBBs.contains(&BB); 2548 } 2549 2550 /// Set of basic blocks that are executed by a single thread. 2551 DenseSet<const BasicBlock *> SingleThreadedBBs; 2552 2553 /// Total number of basic blocks in this function. 2554 long unsigned NumBBs; 2555 }; 2556 2557 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) { 2558 Function *F = getAnchorScope(); 2559 ReversePostOrderTraversal<Function *> RPOT(F); 2560 auto NumSingleThreadedBBs = SingleThreadedBBs.size(); 2561 2562 bool AllCallSitesKnown; 2563 auto PredForCallSite = [&](AbstractCallSite ACS) { 2564 const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>( 2565 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2566 DepClassTy::REQUIRED); 2567 return ACS.isDirectCall() && 2568 ExecutionDomainAA.isExecutedByInitialThreadOnly( 2569 *ACS.getInstruction()); 2570 }; 2571 2572 if (!A.checkForAllCallSites(PredForCallSite, *this, 2573 /* RequiresAllCallSites */ true, 2574 AllCallSitesKnown)) 2575 SingleThreadedBBs.erase(&F->getEntryBlock()); 2576 2577 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2578 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 2579 2580 // Check if the edge into the successor block contains a condition that only 2581 // lets the main thread execute it. 2582 auto IsInitialThreadOnly = [&](BranchInst *Edge, BasicBlock *SuccessorBB) { 2583 if (!Edge || !Edge->isConditional()) 2584 return false; 2585 if (Edge->getSuccessor(0) != SuccessorBB) 2586 return false; 2587 2588 auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition()); 2589 if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality()) 2590 return false; 2591 2592 ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 2593 if (!C) 2594 return false; 2595 2596 // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!) 2597 if (C->isAllOnesValue()) { 2598 auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0)); 2599 CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr; 2600 if (!CB) 2601 return false; 2602 const int InitModeArgNo = 1; 2603 auto *ModeCI = dyn_cast<ConstantInt>(CB->getOperand(InitModeArgNo)); 2604 return ModeCI && (ModeCI->getSExtValue() & OMP_TGT_EXEC_MODE_GENERIC); 2605 } 2606 2607 if (C->isZero()) { 2608 // Match: 0 == llvm.nvvm.read.ptx.sreg.tid.x() 2609 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2610 if (II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_tid_x) 2611 return true; 2612 2613 // Match: 0 == llvm.amdgcn.workitem.id.x() 2614 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2615 if (II->getIntrinsicID() == Intrinsic::amdgcn_workitem_id_x) 2616 return true; 2617 } 2618 2619 return false; 2620 }; 2621 2622 // Merge all the predecessor states into the current basic block. A basic 2623 // block is executed by a single thread if all of its predecessors are. 2624 auto MergePredecessorStates = [&](BasicBlock *BB) { 2625 if (pred_empty(BB)) 2626 return SingleThreadedBBs.contains(BB); 2627 2628 bool IsInitialThread = true; 2629 for (BasicBlock *PredBB : predecessors(BB)) { 2630 if (!IsInitialThreadOnly(dyn_cast<BranchInst>(PredBB->getTerminator()), 2631 BB)) 2632 IsInitialThread &= SingleThreadedBBs.contains(PredBB); 2633 } 2634 2635 return IsInitialThread; 2636 }; 2637 2638 for (auto *BB : RPOT) { 2639 if (!MergePredecessorStates(BB)) 2640 SingleThreadedBBs.erase(BB); 2641 } 2642 2643 return (NumSingleThreadedBBs == SingleThreadedBBs.size()) 2644 ? ChangeStatus::UNCHANGED 2645 : ChangeStatus::CHANGED; 2646 } 2647 2648 /// Try to replace memory allocation calls called by a single thread with a 2649 /// static buffer of shared memory. 2650 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> { 2651 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2652 AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2653 2654 /// Create an abstract attribute view for the position \p IRP. 2655 static AAHeapToShared &createForPosition(const IRPosition &IRP, 2656 Attributor &A); 2657 2658 /// Returns true if HeapToShared conversion is assumed to be possible. 2659 virtual bool isAssumedHeapToShared(CallBase &CB) const = 0; 2660 2661 /// Returns true if HeapToShared conversion is assumed and the CB is a 2662 /// callsite to a free operation to be removed. 2663 virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0; 2664 2665 /// See AbstractAttribute::getName(). 2666 const std::string getName() const override { return "AAHeapToShared"; } 2667 2668 /// See AbstractAttribute::getIdAddr(). 2669 const char *getIdAddr() const override { return &ID; } 2670 2671 /// This function should return true if the type of the \p AA is 2672 /// AAHeapToShared. 2673 static bool classof(const AbstractAttribute *AA) { 2674 return (AA->getIdAddr() == &ID); 2675 } 2676 2677 /// Unique ID (due to the unique address) 2678 static const char ID; 2679 }; 2680 2681 struct AAHeapToSharedFunction : public AAHeapToShared { 2682 AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A) 2683 : AAHeapToShared(IRP, A) {} 2684 2685 const std::string getAsStr() const override { 2686 return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) + 2687 " malloc calls eligible."; 2688 } 2689 2690 /// See AbstractAttribute::trackStatistics(). 2691 void trackStatistics() const override {} 2692 2693 /// This functions finds free calls that will be removed by the 2694 /// HeapToShared transformation. 2695 void findPotentialRemovedFreeCalls(Attributor &A) { 2696 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2697 auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 2698 2699 PotentialRemovedFreeCalls.clear(); 2700 // Update free call users of found malloc calls. 2701 for (CallBase *CB : MallocCalls) { 2702 SmallVector<CallBase *, 4> FreeCalls; 2703 for (auto *U : CB->users()) { 2704 CallBase *C = dyn_cast<CallBase>(U); 2705 if (C && C->getCalledFunction() == FreeRFI.Declaration) 2706 FreeCalls.push_back(C); 2707 } 2708 2709 if (FreeCalls.size() != 1) 2710 continue; 2711 2712 PotentialRemovedFreeCalls.insert(FreeCalls.front()); 2713 } 2714 } 2715 2716 void initialize(Attributor &A) override { 2717 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2718 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 2719 2720 for (User *U : RFI.Declaration->users()) 2721 if (CallBase *CB = dyn_cast<CallBase>(U)) 2722 MallocCalls.insert(CB); 2723 2724 findPotentialRemovedFreeCalls(A); 2725 } 2726 2727 bool isAssumedHeapToShared(CallBase &CB) const override { 2728 return isValidState() && MallocCalls.count(&CB); 2729 } 2730 2731 bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override { 2732 return isValidState() && PotentialRemovedFreeCalls.count(&CB); 2733 } 2734 2735 ChangeStatus manifest(Attributor &A) override { 2736 if (MallocCalls.empty()) 2737 return ChangeStatus::UNCHANGED; 2738 2739 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2740 auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 2741 2742 Function *F = getAnchorScope(); 2743 auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this, 2744 DepClassTy::OPTIONAL); 2745 2746 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2747 for (CallBase *CB : MallocCalls) { 2748 // Skip replacing this if HeapToStack has already claimed it. 2749 if (HS && HS->isAssumedHeapToStack(*CB)) 2750 continue; 2751 2752 // Find the unique free call to remove it. 2753 SmallVector<CallBase *, 4> FreeCalls; 2754 for (auto *U : CB->users()) { 2755 CallBase *C = dyn_cast<CallBase>(U); 2756 if (C && C->getCalledFunction() == FreeCall.Declaration) 2757 FreeCalls.push_back(C); 2758 } 2759 if (FreeCalls.size() != 1) 2760 continue; 2761 2762 ConstantInt *AllocSize = dyn_cast<ConstantInt>(CB->getArgOperand(0)); 2763 2764 LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB 2765 << " with " << AllocSize->getZExtValue() 2766 << " bytes of shared memory\n"); 2767 2768 // Create a new shared memory buffer of the same size as the allocation 2769 // and replace all the uses of the original allocation with it. 2770 Module *M = CB->getModule(); 2771 Type *Int8Ty = Type::getInt8Ty(M->getContext()); 2772 Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue()); 2773 auto *SharedMem = new GlobalVariable( 2774 *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage, 2775 UndefValue::get(Int8ArrTy), CB->getName(), nullptr, 2776 GlobalValue::NotThreadLocal, 2777 static_cast<unsigned>(AddressSpace::Shared)); 2778 auto *NewBuffer = 2779 ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo()); 2780 2781 auto Remark = [&](OptimizationRemark OR) { 2782 return OR << "Replaced globalized variable with " 2783 << ore::NV("SharedMemory", AllocSize->getZExtValue()) 2784 << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ") 2785 << "of shared memory."; 2786 }; 2787 A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark); 2788 2789 MaybeAlign Alignment = CB->getRetAlign(); 2790 assert(Alignment && 2791 "HeapToShared on allocation without alignment attribute"); 2792 SharedMem->setAlignment(MaybeAlign(Alignment)); 2793 2794 A.changeValueAfterManifest(*CB, *NewBuffer); 2795 A.deleteAfterManifest(*CB); 2796 A.deleteAfterManifest(*FreeCalls.front()); 2797 2798 NumBytesMovedToSharedMemory += AllocSize->getZExtValue(); 2799 Changed = ChangeStatus::CHANGED; 2800 } 2801 2802 return Changed; 2803 } 2804 2805 ChangeStatus updateImpl(Attributor &A) override { 2806 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2807 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 2808 Function *F = getAnchorScope(); 2809 2810 auto NumMallocCalls = MallocCalls.size(); 2811 2812 // Only consider malloc calls executed by a single thread with a constant. 2813 for (User *U : RFI.Declaration->users()) { 2814 const auto &ED = A.getAAFor<AAExecutionDomain>( 2815 *this, IRPosition::function(*F), DepClassTy::REQUIRED); 2816 if (CallBase *CB = dyn_cast<CallBase>(U)) 2817 if (!isa<ConstantInt>(CB->getArgOperand(0)) || 2818 !ED.isExecutedByInitialThreadOnly(*CB)) 2819 MallocCalls.erase(CB); 2820 } 2821 2822 findPotentialRemovedFreeCalls(A); 2823 2824 if (NumMallocCalls != MallocCalls.size()) 2825 return ChangeStatus::CHANGED; 2826 2827 return ChangeStatus::UNCHANGED; 2828 } 2829 2830 /// Collection of all malloc calls in a function. 2831 SmallPtrSet<CallBase *, 4> MallocCalls; 2832 /// Collection of potentially removed free calls in a function. 2833 SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls; 2834 }; 2835 2836 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> { 2837 using Base = StateWrapper<KernelInfoState, AbstractAttribute>; 2838 AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2839 2840 /// Statistics are tracked as part of manifest for now. 2841 void trackStatistics() const override {} 2842 2843 /// See AbstractAttribute::getAsStr() 2844 const std::string getAsStr() const override { 2845 if (!isValidState()) 2846 return "<invalid>"; 2847 return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD" 2848 : "generic") + 2849 std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]" 2850 : "") + 2851 std::string(" #PRs: ") + 2852 (ReachedKnownParallelRegions.isValidState() 2853 ? std::to_string(ReachedKnownParallelRegions.size()) 2854 : "<invalid>") + 2855 ", #Unknown PRs: " + 2856 (ReachedUnknownParallelRegions.isValidState() 2857 ? std::to_string(ReachedUnknownParallelRegions.size()) 2858 : "<invalid>") + 2859 ", #Reaching Kernels: " + 2860 (ReachingKernelEntries.isValidState() 2861 ? std::to_string(ReachingKernelEntries.size()) 2862 : "<invalid>"); 2863 } 2864 2865 /// Create an abstract attribute biew for the position \p IRP. 2866 static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A); 2867 2868 /// See AbstractAttribute::getName() 2869 const std::string getName() const override { return "AAKernelInfo"; } 2870 2871 /// See AbstractAttribute::getIdAddr() 2872 const char *getIdAddr() const override { return &ID; } 2873 2874 /// This function should return true if the type of the \p AA is AAKernelInfo 2875 static bool classof(const AbstractAttribute *AA) { 2876 return (AA->getIdAddr() == &ID); 2877 } 2878 2879 static const char ID; 2880 }; 2881 2882 /// The function kernel info abstract attribute, basically, what can we say 2883 /// about a function with regards to the KernelInfoState. 2884 struct AAKernelInfoFunction : AAKernelInfo { 2885 AAKernelInfoFunction(const IRPosition &IRP, Attributor &A) 2886 : AAKernelInfo(IRP, A) {} 2887 2888 SmallPtrSet<Instruction *, 4> GuardedInstructions; 2889 2890 SmallPtrSetImpl<Instruction *> &getGuardedInstructions() { 2891 return GuardedInstructions; 2892 } 2893 2894 /// See AbstractAttribute::initialize(...). 2895 void initialize(Attributor &A) override { 2896 // This is a high-level transform that might change the constant arguments 2897 // of the init and dinit calls. We need to tell the Attributor about this 2898 // to avoid other parts using the current constant value for simpliication. 2899 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2900 2901 Function *Fn = getAnchorScope(); 2902 if (!OMPInfoCache.Kernels.count(Fn)) 2903 return; 2904 2905 // Add itself to the reaching kernel and set IsKernelEntry. 2906 ReachingKernelEntries.insert(Fn); 2907 IsKernelEntry = true; 2908 2909 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 2910 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 2911 OMPInformationCache::RuntimeFunctionInfo &DeinitRFI = 2912 OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit]; 2913 2914 // For kernels we perform more initialization work, first we find the init 2915 // and deinit calls. 2916 auto StoreCallBase = [](Use &U, 2917 OMPInformationCache::RuntimeFunctionInfo &RFI, 2918 CallBase *&Storage) { 2919 CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI); 2920 assert(CB && 2921 "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!"); 2922 assert(!Storage && 2923 "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!"); 2924 Storage = CB; 2925 return false; 2926 }; 2927 InitRFI.foreachUse( 2928 [&](Use &U, Function &) { 2929 StoreCallBase(U, InitRFI, KernelInitCB); 2930 return false; 2931 }, 2932 Fn); 2933 DeinitRFI.foreachUse( 2934 [&](Use &U, Function &) { 2935 StoreCallBase(U, DeinitRFI, KernelDeinitCB); 2936 return false; 2937 }, 2938 Fn); 2939 2940 // Ignore kernels without initializers such as global constructors. 2941 if (!KernelInitCB || !KernelDeinitCB) { 2942 indicateOptimisticFixpoint(); 2943 return; 2944 } 2945 2946 // For kernels we might need to initialize/finalize the IsSPMD state and 2947 // we need to register a simplification callback so that the Attributor 2948 // knows the constant arguments to __kmpc_target_init and 2949 // __kmpc_target_deinit might actually change. 2950 2951 Attributor::SimplifictionCallbackTy StateMachineSimplifyCB = 2952 [&](const IRPosition &IRP, const AbstractAttribute *AA, 2953 bool &UsedAssumedInformation) -> Optional<Value *> { 2954 // IRP represents the "use generic state machine" argument of an 2955 // __kmpc_target_init call. We will answer this one with the internal 2956 // state. As long as we are not in an invalid state, we will create a 2957 // custom state machine so the value should be a `i1 false`. If we are 2958 // in an invalid state, we won't change the value that is in the IR. 2959 if (!ReachedKnownParallelRegions.isValidState()) 2960 return nullptr; 2961 // If we have disabled state machine rewrites, don't make a custom one. 2962 if (DisableOpenMPOptStateMachineRewrite) 2963 return nullptr; 2964 if (AA) 2965 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 2966 UsedAssumedInformation = !isAtFixpoint(); 2967 auto *FalseVal = 2968 ConstantInt::getBool(IRP.getAnchorValue().getContext(), 0); 2969 return FalseVal; 2970 }; 2971 2972 Attributor::SimplifictionCallbackTy ModeSimplifyCB = 2973 [&](const IRPosition &IRP, const AbstractAttribute *AA, 2974 bool &UsedAssumedInformation) -> Optional<Value *> { 2975 // IRP represents the "SPMDCompatibilityTracker" argument of an 2976 // __kmpc_target_init or 2977 // __kmpc_target_deinit call. We will answer this one with the internal 2978 // state. 2979 if (!SPMDCompatibilityTracker.isValidState()) 2980 return nullptr; 2981 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 2982 if (AA) 2983 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 2984 UsedAssumedInformation = true; 2985 } else { 2986 UsedAssumedInformation = false; 2987 } 2988 auto *Val = ConstantInt::getSigned( 2989 IntegerType::getInt8Ty(IRP.getAnchorValue().getContext()), 2990 SPMDCompatibilityTracker.isAssumed() ? OMP_TGT_EXEC_MODE_SPMD 2991 : OMP_TGT_EXEC_MODE_GENERIC); 2992 return Val; 2993 }; 2994 2995 Attributor::SimplifictionCallbackTy IsGenericModeSimplifyCB = 2996 [&](const IRPosition &IRP, const AbstractAttribute *AA, 2997 bool &UsedAssumedInformation) -> Optional<Value *> { 2998 // IRP represents the "RequiresFullRuntime" argument of an 2999 // __kmpc_target_init or __kmpc_target_deinit call. We will answer this 3000 // one with the internal state of the SPMDCompatibilityTracker, so if 3001 // generic then true, if SPMD then false. 3002 if (!SPMDCompatibilityTracker.isValidState()) 3003 return nullptr; 3004 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 3005 if (AA) 3006 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3007 UsedAssumedInformation = true; 3008 } else { 3009 UsedAssumedInformation = false; 3010 } 3011 auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(), 3012 !SPMDCompatibilityTracker.isAssumed()); 3013 return Val; 3014 }; 3015 3016 constexpr const int InitModeArgNo = 1; 3017 constexpr const int DeinitModeArgNo = 1; 3018 constexpr const int InitUseStateMachineArgNo = 2; 3019 constexpr const int InitRequiresFullRuntimeArgNo = 3; 3020 constexpr const int DeinitRequiresFullRuntimeArgNo = 2; 3021 A.registerSimplificationCallback( 3022 IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo), 3023 StateMachineSimplifyCB); 3024 A.registerSimplificationCallback( 3025 IRPosition::callsite_argument(*KernelInitCB, InitModeArgNo), 3026 ModeSimplifyCB); 3027 A.registerSimplificationCallback( 3028 IRPosition::callsite_argument(*KernelDeinitCB, DeinitModeArgNo), 3029 ModeSimplifyCB); 3030 A.registerSimplificationCallback( 3031 IRPosition::callsite_argument(*KernelInitCB, 3032 InitRequiresFullRuntimeArgNo), 3033 IsGenericModeSimplifyCB); 3034 A.registerSimplificationCallback( 3035 IRPosition::callsite_argument(*KernelDeinitCB, 3036 DeinitRequiresFullRuntimeArgNo), 3037 IsGenericModeSimplifyCB); 3038 3039 // Check if we know we are in SPMD-mode already. 3040 ConstantInt *ModeArg = 3041 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 3042 if (ModeArg && (ModeArg->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 3043 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 3044 // This is a generic region but SPMDization is disabled so stop tracking. 3045 else if (DisableOpenMPOptSPMDization) 3046 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3047 } 3048 3049 /// Sanitize the string \p S such that it is a suitable global symbol name. 3050 static std::string sanitizeForGlobalName(std::string S) { 3051 std::replace_if( 3052 S.begin(), S.end(), 3053 [](const char C) { 3054 return !((C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z') || 3055 (C >= '0' && C <= '9') || C == '_'); 3056 }, 3057 '.'); 3058 return S; 3059 } 3060 3061 /// Modify the IR based on the KernelInfoState as the fixpoint iteration is 3062 /// finished now. 3063 ChangeStatus manifest(Attributor &A) override { 3064 // If we are not looking at a kernel with __kmpc_target_init and 3065 // __kmpc_target_deinit call we cannot actually manifest the information. 3066 if (!KernelInitCB || !KernelDeinitCB) 3067 return ChangeStatus::UNCHANGED; 3068 3069 // If we can we change the execution mode to SPMD-mode otherwise we build a 3070 // custom state machine. 3071 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3072 if (!changeToSPMDMode(A, Changed)) 3073 return buildCustomStateMachine(A); 3074 3075 return Changed; 3076 } 3077 3078 bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) { 3079 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3080 3081 if (!SPMDCompatibilityTracker.isAssumed()) { 3082 for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) { 3083 if (!NonCompatibleI) 3084 continue; 3085 3086 // Skip diagnostics on calls to known OpenMP runtime functions for now. 3087 if (auto *CB = dyn_cast<CallBase>(NonCompatibleI)) 3088 if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction())) 3089 continue; 3090 3091 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 3092 ORA << "Value has potential side effects preventing SPMD-mode " 3093 "execution"; 3094 if (isa<CallBase>(NonCompatibleI)) { 3095 ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to " 3096 "the called function to override"; 3097 } 3098 return ORA << "."; 3099 }; 3100 A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121", 3101 Remark); 3102 3103 LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: " 3104 << *NonCompatibleI << "\n"); 3105 } 3106 3107 return false; 3108 } 3109 3110 // Check if the kernel is already in SPMD mode, if so, return success. 3111 Function *Kernel = getAnchorScope(); 3112 GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable( 3113 (Kernel->getName() + "_exec_mode").str()); 3114 assert(ExecMode && "Kernel without exec mode?"); 3115 assert(ExecMode->getInitializer() && "ExecMode doesn't have initializer!"); 3116 3117 // Set the global exec mode flag to indicate SPMD-Generic mode. 3118 assert(isa<ConstantInt>(ExecMode->getInitializer()) && 3119 "ExecMode is not an integer!"); 3120 const int8_t ExecModeVal = 3121 cast<ConstantInt>(ExecMode->getInitializer())->getSExtValue(); 3122 if (ExecModeVal != OMP_TGT_EXEC_MODE_GENERIC) 3123 return true; 3124 3125 // We will now unconditionally modify the IR, indicate a change. 3126 Changed = ChangeStatus::CHANGED; 3127 3128 auto CreateGuardedRegion = [&](Instruction *RegionStartI, 3129 Instruction *RegionEndI) { 3130 LoopInfo *LI = nullptr; 3131 DominatorTree *DT = nullptr; 3132 MemorySSAUpdater *MSU = nullptr; 3133 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 3134 3135 BasicBlock *ParentBB = RegionStartI->getParent(); 3136 Function *Fn = ParentBB->getParent(); 3137 Module &M = *Fn->getParent(); 3138 3139 // Create all the blocks and logic. 3140 // ParentBB: 3141 // goto RegionCheckTidBB 3142 // RegionCheckTidBB: 3143 // Tid = __kmpc_hardware_thread_id() 3144 // if (Tid != 0) 3145 // goto RegionBarrierBB 3146 // RegionStartBB: 3147 // <execute instructions guarded> 3148 // goto RegionEndBB 3149 // RegionEndBB: 3150 // <store escaping values to shared mem> 3151 // goto RegionBarrierBB 3152 // RegionBarrierBB: 3153 // __kmpc_simple_barrier_spmd() 3154 // // second barrier is omitted if lacking escaping values. 3155 // <load escaping values from shared mem> 3156 // __kmpc_simple_barrier_spmd() 3157 // goto RegionExitBB 3158 // RegionExitBB: 3159 // <execute rest of instructions> 3160 3161 BasicBlock *RegionEndBB = SplitBlock(ParentBB, RegionEndI->getNextNode(), 3162 DT, LI, MSU, "region.guarded.end"); 3163 BasicBlock *RegionBarrierBB = 3164 SplitBlock(RegionEndBB, &*RegionEndBB->getFirstInsertionPt(), DT, LI, 3165 MSU, "region.barrier"); 3166 BasicBlock *RegionExitBB = 3167 SplitBlock(RegionBarrierBB, &*RegionBarrierBB->getFirstInsertionPt(), 3168 DT, LI, MSU, "region.exit"); 3169 BasicBlock *RegionStartBB = 3170 SplitBlock(ParentBB, RegionStartI, DT, LI, MSU, "region.guarded"); 3171 3172 assert(ParentBB->getUniqueSuccessor() == RegionStartBB && 3173 "Expected a different CFG"); 3174 3175 BasicBlock *RegionCheckTidBB = SplitBlock( 3176 ParentBB, ParentBB->getTerminator(), DT, LI, MSU, "region.check.tid"); 3177 3178 // Register basic blocks with the Attributor. 3179 A.registerManifestAddedBasicBlock(*RegionEndBB); 3180 A.registerManifestAddedBasicBlock(*RegionBarrierBB); 3181 A.registerManifestAddedBasicBlock(*RegionExitBB); 3182 A.registerManifestAddedBasicBlock(*RegionStartBB); 3183 A.registerManifestAddedBasicBlock(*RegionCheckTidBB); 3184 3185 bool HasBroadcastValues = false; 3186 // Find escaping outputs from the guarded region to outside users and 3187 // broadcast their values to them. 3188 for (Instruction &I : *RegionStartBB) { 3189 SmallPtrSet<Instruction *, 4> OutsideUsers; 3190 for (User *Usr : I.users()) { 3191 Instruction &UsrI = *cast<Instruction>(Usr); 3192 if (UsrI.getParent() != RegionStartBB) 3193 OutsideUsers.insert(&UsrI); 3194 } 3195 3196 if (OutsideUsers.empty()) 3197 continue; 3198 3199 HasBroadcastValues = true; 3200 3201 // Emit a global variable in shared memory to store the broadcasted 3202 // value. 3203 auto *SharedMem = new GlobalVariable( 3204 M, I.getType(), /* IsConstant */ false, 3205 GlobalValue::InternalLinkage, UndefValue::get(I.getType()), 3206 sanitizeForGlobalName( 3207 (I.getName() + ".guarded.output.alloc").str()), 3208 nullptr, GlobalValue::NotThreadLocal, 3209 static_cast<unsigned>(AddressSpace::Shared)); 3210 3211 // Emit a store instruction to update the value. 3212 new StoreInst(&I, SharedMem, RegionEndBB->getTerminator()); 3213 3214 LoadInst *LoadI = new LoadInst(I.getType(), SharedMem, 3215 I.getName() + ".guarded.output.load", 3216 RegionBarrierBB->getTerminator()); 3217 3218 // Emit a load instruction and replace uses of the output value. 3219 for (Instruction *UsrI : OutsideUsers) 3220 UsrI->replaceUsesOfWith(&I, LoadI); 3221 } 3222 3223 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3224 3225 // Go to tid check BB in ParentBB. 3226 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 3227 ParentBB->getTerminator()->eraseFromParent(); 3228 OpenMPIRBuilder::LocationDescription Loc( 3229 InsertPointTy(ParentBB, ParentBB->end()), DL); 3230 OMPInfoCache.OMPBuilder.updateToLocation(Loc); 3231 auto *SrcLocStr = OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc); 3232 Value *Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr); 3233 BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL); 3234 3235 // Add check for Tid in RegionCheckTidBB 3236 RegionCheckTidBB->getTerminator()->eraseFromParent(); 3237 OpenMPIRBuilder::LocationDescription LocRegionCheckTid( 3238 InsertPointTy(RegionCheckTidBB, RegionCheckTidBB->end()), DL); 3239 OMPInfoCache.OMPBuilder.updateToLocation(LocRegionCheckTid); 3240 FunctionCallee HardwareTidFn = 3241 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3242 M, OMPRTL___kmpc_get_hardware_thread_id_in_block); 3243 Value *Tid = 3244 OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {}); 3245 Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid); 3246 OMPInfoCache.OMPBuilder.Builder 3247 .CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB) 3248 ->setDebugLoc(DL); 3249 3250 // First barrier for synchronization, ensures main thread has updated 3251 // values. 3252 FunctionCallee BarrierFn = 3253 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3254 M, OMPRTL___kmpc_barrier_simple_spmd); 3255 OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy( 3256 RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt())); 3257 OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid}) 3258 ->setDebugLoc(DL); 3259 3260 // Second barrier ensures workers have read broadcast values. 3261 if (HasBroadcastValues) 3262 CallInst::Create(BarrierFn, {Ident, Tid}, "", 3263 RegionBarrierBB->getTerminator()) 3264 ->setDebugLoc(DL); 3265 }; 3266 3267 auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3268 SmallPtrSet<BasicBlock *, 8> Visited; 3269 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3270 BasicBlock *BB = GuardedI->getParent(); 3271 if (!Visited.insert(BB).second) 3272 continue; 3273 3274 SmallVector<std::pair<Instruction *, Instruction *>> Reorders; 3275 Instruction *LastEffect = nullptr; 3276 BasicBlock::reverse_iterator IP = BB->rbegin(), IPEnd = BB->rend(); 3277 while (++IP != IPEnd) { 3278 if (!IP->mayHaveSideEffects() && !IP->mayReadFromMemory()) 3279 continue; 3280 Instruction *I = &*IP; 3281 if (OpenMPOpt::getCallIfRegularCall(*I, &AllocSharedRFI)) 3282 continue; 3283 if (!I->user_empty() || !SPMDCompatibilityTracker.contains(I)) { 3284 LastEffect = nullptr; 3285 continue; 3286 } 3287 if (LastEffect) 3288 Reorders.push_back({I, LastEffect}); 3289 LastEffect = &*IP; 3290 } 3291 for (auto &Reorder : Reorders) 3292 Reorder.first->moveBefore(Reorder.second); 3293 } 3294 3295 SmallVector<std::pair<Instruction *, Instruction *>, 4> GuardedRegions; 3296 3297 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3298 BasicBlock *BB = GuardedI->getParent(); 3299 auto *CalleeAA = A.lookupAAFor<AAKernelInfo>( 3300 IRPosition::function(*GuardedI->getFunction()), nullptr, 3301 DepClassTy::NONE); 3302 assert(CalleeAA != nullptr && "Expected Callee AAKernelInfo"); 3303 auto &CalleeAAFunction = *cast<AAKernelInfoFunction>(CalleeAA); 3304 // Continue if instruction is already guarded. 3305 if (CalleeAAFunction.getGuardedInstructions().contains(GuardedI)) 3306 continue; 3307 3308 Instruction *GuardedRegionStart = nullptr, *GuardedRegionEnd = nullptr; 3309 for (Instruction &I : *BB) { 3310 // If instruction I needs to be guarded update the guarded region 3311 // bounds. 3312 if (SPMDCompatibilityTracker.contains(&I)) { 3313 CalleeAAFunction.getGuardedInstructions().insert(&I); 3314 if (GuardedRegionStart) 3315 GuardedRegionEnd = &I; 3316 else 3317 GuardedRegionStart = GuardedRegionEnd = &I; 3318 3319 continue; 3320 } 3321 3322 // Instruction I does not need guarding, store 3323 // any region found and reset bounds. 3324 if (GuardedRegionStart) { 3325 GuardedRegions.push_back( 3326 std::make_pair(GuardedRegionStart, GuardedRegionEnd)); 3327 GuardedRegionStart = nullptr; 3328 GuardedRegionEnd = nullptr; 3329 } 3330 } 3331 } 3332 3333 for (auto &GR : GuardedRegions) 3334 CreateGuardedRegion(GR.first, GR.second); 3335 3336 // Adjust the global exec mode flag that tells the runtime what mode this 3337 // kernel is executed in. 3338 assert(ExecModeVal == OMP_TGT_EXEC_MODE_GENERIC && 3339 "Initially non-SPMD kernel has SPMD exec mode!"); 3340 ExecMode->setInitializer( 3341 ConstantInt::get(ExecMode->getInitializer()->getType(), 3342 ExecModeVal | OMP_TGT_EXEC_MODE_GENERIC_SPMD)); 3343 3344 // Next rewrite the init and deinit calls to indicate we use SPMD-mode now. 3345 const int InitModeArgNo = 1; 3346 const int DeinitModeArgNo = 1; 3347 const int InitUseStateMachineArgNo = 2; 3348 const int InitRequiresFullRuntimeArgNo = 3; 3349 const int DeinitRequiresFullRuntimeArgNo = 2; 3350 3351 auto &Ctx = getAnchorValue().getContext(); 3352 A.changeUseAfterManifest( 3353 KernelInitCB->getArgOperandUse(InitModeArgNo), 3354 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 3355 OMP_TGT_EXEC_MODE_SPMD)); 3356 A.changeUseAfterManifest( 3357 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), 3358 *ConstantInt::getBool(Ctx, 0)); 3359 A.changeUseAfterManifest( 3360 KernelDeinitCB->getArgOperandUse(DeinitModeArgNo), 3361 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 3362 OMP_TGT_EXEC_MODE_SPMD)); 3363 A.changeUseAfterManifest( 3364 KernelInitCB->getArgOperandUse(InitRequiresFullRuntimeArgNo), 3365 *ConstantInt::getBool(Ctx, 0)); 3366 A.changeUseAfterManifest( 3367 KernelDeinitCB->getArgOperandUse(DeinitRequiresFullRuntimeArgNo), 3368 *ConstantInt::getBool(Ctx, 0)); 3369 3370 ++NumOpenMPTargetRegionKernelsSPMD; 3371 3372 auto Remark = [&](OptimizationRemark OR) { 3373 return OR << "Transformed generic-mode kernel to SPMD-mode."; 3374 }; 3375 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark); 3376 return true; 3377 }; 3378 3379 ChangeStatus buildCustomStateMachine(Attributor &A) { 3380 // If we have disabled state machine rewrites, don't make a custom one 3381 if (DisableOpenMPOptStateMachineRewrite) 3382 return ChangeStatus::UNCHANGED; 3383 3384 // Don't rewrite the state machine if we are not in a valid state. 3385 if (!ReachedKnownParallelRegions.isValidState()) 3386 return ChangeStatus::UNCHANGED; 3387 3388 const int InitModeArgNo = 1; 3389 const int InitUseStateMachineArgNo = 2; 3390 3391 // Check if the current configuration is non-SPMD and generic state machine. 3392 // If we already have SPMD mode or a custom state machine we do not need to 3393 // go any further. If it is anything but a constant something is weird and 3394 // we give up. 3395 ConstantInt *UseStateMachine = dyn_cast<ConstantInt>( 3396 KernelInitCB->getArgOperand(InitUseStateMachineArgNo)); 3397 ConstantInt *Mode = 3398 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 3399 3400 // If we are stuck with generic mode, try to create a custom device (=GPU) 3401 // state machine which is specialized for the parallel regions that are 3402 // reachable by the kernel. 3403 if (!UseStateMachine || UseStateMachine->isZero() || !Mode || 3404 (Mode->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 3405 return ChangeStatus::UNCHANGED; 3406 3407 // If not SPMD mode, indicate we use a custom state machine now. 3408 auto &Ctx = getAnchorValue().getContext(); 3409 auto *FalseVal = ConstantInt::getBool(Ctx, 0); 3410 A.changeUseAfterManifest( 3411 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal); 3412 3413 // If we don't actually need a state machine we are done here. This can 3414 // happen if there simply are no parallel regions. In the resulting kernel 3415 // all worker threads will simply exit right away, leaving the main thread 3416 // to do the work alone. 3417 if (!mayContainParallelRegion()) { 3418 ++NumOpenMPTargetRegionKernelsWithoutStateMachine; 3419 3420 auto Remark = [&](OptimizationRemark OR) { 3421 return OR << "Removing unused state machine from generic-mode kernel."; 3422 }; 3423 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark); 3424 3425 return ChangeStatus::CHANGED; 3426 } 3427 3428 // Keep track in the statistics of our new shiny custom state machine. 3429 if (ReachedUnknownParallelRegions.empty()) { 3430 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback; 3431 3432 auto Remark = [&](OptimizationRemark OR) { 3433 return OR << "Rewriting generic-mode kernel with a customized state " 3434 "machine."; 3435 }; 3436 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark); 3437 } else { 3438 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback; 3439 3440 auto Remark = [&](OptimizationRemarkAnalysis OR) { 3441 return OR << "Generic-mode kernel is executed with a customized state " 3442 "machine that requires a fallback."; 3443 }; 3444 A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark); 3445 3446 // Tell the user why we ended up with a fallback. 3447 for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) { 3448 if (!UnknownParallelRegionCB) 3449 continue; 3450 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 3451 return ORA << "Call may contain unknown parallel regions. Use " 3452 << "`__attribute__((assume(\"omp_no_parallelism\")))` to " 3453 "override."; 3454 }; 3455 A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB, 3456 "OMP133", Remark); 3457 } 3458 } 3459 3460 // Create all the blocks: 3461 // 3462 // InitCB = __kmpc_target_init(...) 3463 // BlockHwSize = 3464 // __kmpc_get_hardware_num_threads_in_block(); 3465 // WarpSize = __kmpc_get_warp_size(); 3466 // BlockSize = BlockHwSize - WarpSize; 3467 // if (InitCB >= BlockSize) return; 3468 // IsWorkerCheckBB: bool IsWorker = InitCB >= 0; 3469 // if (IsWorker) { 3470 // SMBeginBB: __kmpc_barrier_simple_generic(...); 3471 // void *WorkFn; 3472 // bool Active = __kmpc_kernel_parallel(&WorkFn); 3473 // if (!WorkFn) return; 3474 // SMIsActiveCheckBB: if (Active) { 3475 // SMIfCascadeCurrentBB: if (WorkFn == <ParFn0>) 3476 // ParFn0(...); 3477 // SMIfCascadeCurrentBB: else if (WorkFn == <ParFn1>) 3478 // ParFn1(...); 3479 // ... 3480 // SMIfCascadeCurrentBB: else 3481 // ((WorkFnTy*)WorkFn)(...); 3482 // SMEndParallelBB: __kmpc_kernel_end_parallel(...); 3483 // } 3484 // SMDoneBB: __kmpc_barrier_simple_generic(...); 3485 // goto SMBeginBB; 3486 // } 3487 // UserCodeEntryBB: // user code 3488 // __kmpc_target_deinit(...) 3489 // 3490 Function *Kernel = getAssociatedFunction(); 3491 assert(Kernel && "Expected an associated function!"); 3492 3493 BasicBlock *InitBB = KernelInitCB->getParent(); 3494 BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock( 3495 KernelInitCB->getNextNode(), "thread.user_code.check"); 3496 BasicBlock *IsWorkerCheckBB = 3497 BasicBlock::Create(Ctx, "is_worker_check", Kernel, UserCodeEntryBB); 3498 BasicBlock *StateMachineBeginBB = BasicBlock::Create( 3499 Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB); 3500 BasicBlock *StateMachineFinishedBB = BasicBlock::Create( 3501 Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB); 3502 BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create( 3503 Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB); 3504 BasicBlock *StateMachineIfCascadeCurrentBB = 3505 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 3506 Kernel, UserCodeEntryBB); 3507 BasicBlock *StateMachineEndParallelBB = 3508 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end", 3509 Kernel, UserCodeEntryBB); 3510 BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create( 3511 Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB); 3512 A.registerManifestAddedBasicBlock(*InitBB); 3513 A.registerManifestAddedBasicBlock(*UserCodeEntryBB); 3514 A.registerManifestAddedBasicBlock(*IsWorkerCheckBB); 3515 A.registerManifestAddedBasicBlock(*StateMachineBeginBB); 3516 A.registerManifestAddedBasicBlock(*StateMachineFinishedBB); 3517 A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB); 3518 A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB); 3519 A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB); 3520 A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB); 3521 3522 const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); 3523 ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc); 3524 InitBB->getTerminator()->eraseFromParent(); 3525 3526 Module &M = *Kernel->getParent(); 3527 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3528 FunctionCallee BlockHwSizeFn = 3529 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3530 M, OMPRTL___kmpc_get_hardware_num_threads_in_block); 3531 FunctionCallee WarpSizeFn = 3532 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3533 M, OMPRTL___kmpc_get_warp_size); 3534 Instruction *BlockHwSize = 3535 CallInst::Create(BlockHwSizeFn, "block.hw_size", InitBB); 3536 BlockHwSize->setDebugLoc(DLoc); 3537 Instruction *WarpSize = CallInst::Create(WarpSizeFn, "warp.size", InitBB); 3538 WarpSize->setDebugLoc(DLoc); 3539 Instruction *BlockSize = 3540 BinaryOperator::CreateSub(BlockHwSize, WarpSize, "block.size", InitBB); 3541 BlockSize->setDebugLoc(DLoc); 3542 Instruction *IsMainOrWorker = 3543 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_SLT, KernelInitCB, 3544 BlockSize, "thread.is_main_or_worker", InitBB); 3545 IsMainOrWorker->setDebugLoc(DLoc); 3546 BranchInst::Create(IsWorkerCheckBB, StateMachineFinishedBB, IsMainOrWorker, 3547 InitBB); 3548 3549 Instruction *IsWorker = 3550 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB, 3551 ConstantInt::get(KernelInitCB->getType(), -1), 3552 "thread.is_worker", IsWorkerCheckBB); 3553 IsWorker->setDebugLoc(DLoc); 3554 BranchInst::Create(StateMachineBeginBB, UserCodeEntryBB, IsWorker, 3555 IsWorkerCheckBB); 3556 3557 // Create local storage for the work function pointer. 3558 const DataLayout &DL = M.getDataLayout(); 3559 Type *VoidPtrTy = Type::getInt8PtrTy(Ctx); 3560 Instruction *WorkFnAI = 3561 new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr, 3562 "worker.work_fn.addr", &Kernel->getEntryBlock().front()); 3563 WorkFnAI->setDebugLoc(DLoc); 3564 3565 OMPInfoCache.OMPBuilder.updateToLocation( 3566 OpenMPIRBuilder::LocationDescription( 3567 IRBuilder<>::InsertPoint(StateMachineBeginBB, 3568 StateMachineBeginBB->end()), 3569 DLoc)); 3570 3571 Value *Ident = KernelInitCB->getArgOperand(0); 3572 Value *GTid = KernelInitCB; 3573 3574 FunctionCallee BarrierFn = 3575 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3576 M, OMPRTL___kmpc_barrier_simple_generic); 3577 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB) 3578 ->setDebugLoc(DLoc); 3579 3580 if (WorkFnAI->getType()->getPointerAddressSpace() != 3581 (unsigned int)AddressSpace::Generic) { 3582 WorkFnAI = new AddrSpaceCastInst( 3583 WorkFnAI, 3584 PointerType::getWithSamePointeeType( 3585 cast<PointerType>(WorkFnAI->getType()), 3586 (unsigned int)AddressSpace::Generic), 3587 WorkFnAI->getName() + ".generic", StateMachineBeginBB); 3588 WorkFnAI->setDebugLoc(DLoc); 3589 } 3590 3591 FunctionCallee KernelParallelFn = 3592 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3593 M, OMPRTL___kmpc_kernel_parallel); 3594 Instruction *IsActiveWorker = CallInst::Create( 3595 KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB); 3596 IsActiveWorker->setDebugLoc(DLoc); 3597 Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn", 3598 StateMachineBeginBB); 3599 WorkFn->setDebugLoc(DLoc); 3600 3601 FunctionType *ParallelRegionFnTy = FunctionType::get( 3602 Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)}, 3603 false); 3604 Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 3605 WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast", 3606 StateMachineBeginBB); 3607 3608 Instruction *IsDone = 3609 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn, 3610 Constant::getNullValue(VoidPtrTy), "worker.is_done", 3611 StateMachineBeginBB); 3612 IsDone->setDebugLoc(DLoc); 3613 BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB, 3614 IsDone, StateMachineBeginBB) 3615 ->setDebugLoc(DLoc); 3616 3617 BranchInst::Create(StateMachineIfCascadeCurrentBB, 3618 StateMachineDoneBarrierBB, IsActiveWorker, 3619 StateMachineIsActiveCheckBB) 3620 ->setDebugLoc(DLoc); 3621 3622 Value *ZeroArg = 3623 Constant::getNullValue(ParallelRegionFnTy->getParamType(0)); 3624 3625 // Now that we have most of the CFG skeleton it is time for the if-cascade 3626 // that checks the function pointer we got from the runtime against the 3627 // parallel regions we expect, if there are any. 3628 for (int I = 0, E = ReachedKnownParallelRegions.size(); I < E; ++I) { 3629 auto *ParallelRegion = ReachedKnownParallelRegions[I]; 3630 BasicBlock *PRExecuteBB = BasicBlock::Create( 3631 Ctx, "worker_state_machine.parallel_region.execute", Kernel, 3632 StateMachineEndParallelBB); 3633 CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB) 3634 ->setDebugLoc(DLoc); 3635 BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB) 3636 ->setDebugLoc(DLoc); 3637 3638 BasicBlock *PRNextBB = 3639 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 3640 Kernel, StateMachineEndParallelBB); 3641 3642 // Check if we need to compare the pointer at all or if we can just 3643 // call the parallel region function. 3644 Value *IsPR; 3645 if (I + 1 < E || !ReachedUnknownParallelRegions.empty()) { 3646 Instruction *CmpI = ICmpInst::Create( 3647 ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion, 3648 "worker.check_parallel_region", StateMachineIfCascadeCurrentBB); 3649 CmpI->setDebugLoc(DLoc); 3650 IsPR = CmpI; 3651 } else { 3652 IsPR = ConstantInt::getTrue(Ctx); 3653 } 3654 3655 BranchInst::Create(PRExecuteBB, PRNextBB, IsPR, 3656 StateMachineIfCascadeCurrentBB) 3657 ->setDebugLoc(DLoc); 3658 StateMachineIfCascadeCurrentBB = PRNextBB; 3659 } 3660 3661 // At the end of the if-cascade we place the indirect function pointer call 3662 // in case we might need it, that is if there can be parallel regions we 3663 // have not handled in the if-cascade above. 3664 if (!ReachedUnknownParallelRegions.empty()) { 3665 StateMachineIfCascadeCurrentBB->setName( 3666 "worker_state_machine.parallel_region.fallback.execute"); 3667 CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "", 3668 StateMachineIfCascadeCurrentBB) 3669 ->setDebugLoc(DLoc); 3670 } 3671 BranchInst::Create(StateMachineEndParallelBB, 3672 StateMachineIfCascadeCurrentBB) 3673 ->setDebugLoc(DLoc); 3674 3675 CallInst::Create(OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3676 M, OMPRTL___kmpc_kernel_end_parallel), 3677 {}, "", StateMachineEndParallelBB) 3678 ->setDebugLoc(DLoc); 3679 BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB) 3680 ->setDebugLoc(DLoc); 3681 3682 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB) 3683 ->setDebugLoc(DLoc); 3684 BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB) 3685 ->setDebugLoc(DLoc); 3686 3687 return ChangeStatus::CHANGED; 3688 } 3689 3690 /// Fixpoint iteration update function. Will be called every time a dependence 3691 /// changed its state (and in the beginning). 3692 ChangeStatus updateImpl(Attributor &A) override { 3693 KernelInfoState StateBefore = getState(); 3694 3695 // Callback to check a read/write instruction. 3696 auto CheckRWInst = [&](Instruction &I) { 3697 // We handle calls later. 3698 if (isa<CallBase>(I)) 3699 return true; 3700 // We only care about write effects. 3701 if (!I.mayWriteToMemory()) 3702 return true; 3703 if (auto *SI = dyn_cast<StoreInst>(&I)) { 3704 SmallVector<const Value *> Objects; 3705 getUnderlyingObjects(SI->getPointerOperand(), Objects); 3706 if (llvm::all_of(Objects, 3707 [](const Value *Obj) { return isa<AllocaInst>(Obj); })) 3708 return true; 3709 // Check for AAHeapToStack moved objects which must not be guarded. 3710 auto &HS = A.getAAFor<AAHeapToStack>( 3711 *this, IRPosition::function(*I.getFunction()), 3712 DepClassTy::OPTIONAL); 3713 if (llvm::all_of(Objects, [&HS](const Value *Obj) { 3714 auto *CB = dyn_cast<CallBase>(Obj); 3715 if (!CB) 3716 return false; 3717 return HS.isAssumedHeapToStack(*CB); 3718 })) { 3719 return true; 3720 } 3721 } 3722 3723 // Insert instruction that needs guarding. 3724 SPMDCompatibilityTracker.insert(&I); 3725 return true; 3726 }; 3727 3728 bool UsedAssumedInformationInCheckRWInst = false; 3729 if (!SPMDCompatibilityTracker.isAtFixpoint()) 3730 if (!A.checkForAllReadWriteInstructions( 3731 CheckRWInst, *this, UsedAssumedInformationInCheckRWInst)) 3732 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3733 3734 bool UsedAssumedInformationFromReachingKernels = false; 3735 if (!IsKernelEntry) { 3736 updateParallelLevels(A); 3737 3738 bool AllReachingKernelsKnown = true; 3739 updateReachingKernelEntries(A, AllReachingKernelsKnown); 3740 UsedAssumedInformationFromReachingKernels = !AllReachingKernelsKnown; 3741 3742 if (!ParallelLevels.isValidState()) 3743 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3744 else if (!ReachingKernelEntries.isValidState()) 3745 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3746 else if (!SPMDCompatibilityTracker.empty()) { 3747 // Check if all reaching kernels agree on the mode as we can otherwise 3748 // not guard instructions. We might not be sure about the mode so we 3749 // we cannot fix the internal spmd-zation state either. 3750 int SPMD = 0, Generic = 0; 3751 for (auto *Kernel : ReachingKernelEntries) { 3752 auto &CBAA = A.getAAFor<AAKernelInfo>( 3753 *this, IRPosition::function(*Kernel), DepClassTy::OPTIONAL); 3754 if (CBAA.SPMDCompatibilityTracker.isValidState() && 3755 CBAA.SPMDCompatibilityTracker.isAssumed()) 3756 ++SPMD; 3757 else 3758 ++Generic; 3759 if (!CBAA.SPMDCompatibilityTracker.isAtFixpoint()) 3760 UsedAssumedInformationFromReachingKernels = true; 3761 } 3762 if (SPMD != 0 && Generic != 0) 3763 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3764 } 3765 } 3766 3767 // Callback to check a call instruction. 3768 bool AllParallelRegionStatesWereFixed = true; 3769 bool AllSPMDStatesWereFixed = true; 3770 auto CheckCallInst = [&](Instruction &I) { 3771 auto &CB = cast<CallBase>(I); 3772 auto &CBAA = A.getAAFor<AAKernelInfo>( 3773 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 3774 getState() ^= CBAA.getState(); 3775 AllSPMDStatesWereFixed &= CBAA.SPMDCompatibilityTracker.isAtFixpoint(); 3776 AllParallelRegionStatesWereFixed &= 3777 CBAA.ReachedKnownParallelRegions.isAtFixpoint(); 3778 AllParallelRegionStatesWereFixed &= 3779 CBAA.ReachedUnknownParallelRegions.isAtFixpoint(); 3780 return true; 3781 }; 3782 3783 bool UsedAssumedInformationInCheckCallInst = false; 3784 if (!A.checkForAllCallLikeInstructions( 3785 CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) { 3786 LLVM_DEBUG(dbgs() << TAG 3787 << "Failed to visit all call-like instructions!\n";); 3788 return indicatePessimisticFixpoint(); 3789 } 3790 3791 // If we haven't used any assumed information for the reached parallel 3792 // region states we can fix it. 3793 if (!UsedAssumedInformationInCheckCallInst && 3794 AllParallelRegionStatesWereFixed) { 3795 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 3796 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 3797 } 3798 3799 // If we are sure there are no parallel regions in the kernel we do not 3800 // want SPMD mode. 3801 if (IsKernelEntry && ReachedUnknownParallelRegions.isAtFixpoint() && 3802 ReachedKnownParallelRegions.isAtFixpoint() && 3803 ReachedUnknownParallelRegions.isValidState() && 3804 ReachedKnownParallelRegions.isValidState() && 3805 !mayContainParallelRegion()) 3806 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3807 3808 // If we haven't used any assumed information for the SPMD state we can fix 3809 // it. 3810 if (!UsedAssumedInformationInCheckRWInst && 3811 !UsedAssumedInformationInCheckCallInst && 3812 !UsedAssumedInformationFromReachingKernels && AllSPMDStatesWereFixed) 3813 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 3814 3815 return StateBefore == getState() ? ChangeStatus::UNCHANGED 3816 : ChangeStatus::CHANGED; 3817 } 3818 3819 private: 3820 /// Update info regarding reaching kernels. 3821 void updateReachingKernelEntries(Attributor &A, 3822 bool &AllReachingKernelsKnown) { 3823 auto PredCallSite = [&](AbstractCallSite ACS) { 3824 Function *Caller = ACS.getInstruction()->getFunction(); 3825 3826 assert(Caller && "Caller is nullptr"); 3827 3828 auto &CAA = A.getOrCreateAAFor<AAKernelInfo>( 3829 IRPosition::function(*Caller), this, DepClassTy::REQUIRED); 3830 if (CAA.ReachingKernelEntries.isValidState()) { 3831 ReachingKernelEntries ^= CAA.ReachingKernelEntries; 3832 return true; 3833 } 3834 3835 // We lost track of the caller of the associated function, any kernel 3836 // could reach now. 3837 ReachingKernelEntries.indicatePessimisticFixpoint(); 3838 3839 return true; 3840 }; 3841 3842 if (!A.checkForAllCallSites(PredCallSite, *this, 3843 true /* RequireAllCallSites */, 3844 AllReachingKernelsKnown)) 3845 ReachingKernelEntries.indicatePessimisticFixpoint(); 3846 } 3847 3848 /// Update info regarding parallel levels. 3849 void updateParallelLevels(Attributor &A) { 3850 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3851 OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI = 3852 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 3853 3854 auto PredCallSite = [&](AbstractCallSite ACS) { 3855 Function *Caller = ACS.getInstruction()->getFunction(); 3856 3857 assert(Caller && "Caller is nullptr"); 3858 3859 auto &CAA = 3860 A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller)); 3861 if (CAA.ParallelLevels.isValidState()) { 3862 // Any function that is called by `__kmpc_parallel_51` will not be 3863 // folded as the parallel level in the function is updated. In order to 3864 // get it right, all the analysis would depend on the implentation. That 3865 // said, if in the future any change to the implementation, the analysis 3866 // could be wrong. As a consequence, we are just conservative here. 3867 if (Caller == Parallel51RFI.Declaration) { 3868 ParallelLevels.indicatePessimisticFixpoint(); 3869 return true; 3870 } 3871 3872 ParallelLevels ^= CAA.ParallelLevels; 3873 3874 return true; 3875 } 3876 3877 // We lost track of the caller of the associated function, any kernel 3878 // could reach now. 3879 ParallelLevels.indicatePessimisticFixpoint(); 3880 3881 return true; 3882 }; 3883 3884 bool AllCallSitesKnown = true; 3885 if (!A.checkForAllCallSites(PredCallSite, *this, 3886 true /* RequireAllCallSites */, 3887 AllCallSitesKnown)) 3888 ParallelLevels.indicatePessimisticFixpoint(); 3889 } 3890 }; 3891 3892 /// The call site kernel info abstract attribute, basically, what can we say 3893 /// about a call site with regards to the KernelInfoState. For now this simply 3894 /// forwards the information from the callee. 3895 struct AAKernelInfoCallSite : AAKernelInfo { 3896 AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A) 3897 : AAKernelInfo(IRP, A) {} 3898 3899 /// See AbstractAttribute::initialize(...). 3900 void initialize(Attributor &A) override { 3901 AAKernelInfo::initialize(A); 3902 3903 CallBase &CB = cast<CallBase>(getAssociatedValue()); 3904 Function *Callee = getAssociatedFunction(); 3905 3906 auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 3907 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 3908 3909 // Check for SPMD-mode assumptions. 3910 if (AssumptionAA.hasAssumption("ompx_spmd_amenable")) { 3911 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 3912 indicateOptimisticFixpoint(); 3913 } 3914 3915 // First weed out calls we do not care about, that is readonly/readnone 3916 // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a 3917 // parallel region or anything else we are looking for. 3918 if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) { 3919 indicateOptimisticFixpoint(); 3920 return; 3921 } 3922 3923 // Next we check if we know the callee. If it is a known OpenMP function 3924 // we will handle them explicitly in the switch below. If it is not, we 3925 // will use an AAKernelInfo object on the callee to gather information and 3926 // merge that into the current state. The latter happens in the updateImpl. 3927 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3928 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 3929 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 3930 // Unknown caller or declarations are not analyzable, we give up. 3931 if (!Callee || !A.isFunctionIPOAmendable(*Callee)) { 3932 3933 // Unknown callees might contain parallel regions, except if they have 3934 // an appropriate assumption attached. 3935 if (!(AssumptionAA.hasAssumption("omp_no_openmp") || 3936 AssumptionAA.hasAssumption("omp_no_parallelism"))) 3937 ReachedUnknownParallelRegions.insert(&CB); 3938 3939 // If SPMDCompatibilityTracker is not fixed, we need to give up on the 3940 // idea we can run something unknown in SPMD-mode. 3941 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 3942 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3943 SPMDCompatibilityTracker.insert(&CB); 3944 } 3945 3946 // We have updated the state for this unknown call properly, there won't 3947 // be any change so we indicate a fixpoint. 3948 indicateOptimisticFixpoint(); 3949 } 3950 // If the callee is known and can be used in IPO, we will update the state 3951 // based on the callee state in updateImpl. 3952 return; 3953 } 3954 3955 const unsigned int WrapperFunctionArgNo = 6; 3956 RuntimeFunction RF = It->getSecond(); 3957 switch (RF) { 3958 // All the functions we know are compatible with SPMD mode. 3959 case OMPRTL___kmpc_is_spmd_exec_mode: 3960 case OMPRTL___kmpc_distribute_static_fini: 3961 case OMPRTL___kmpc_for_static_fini: 3962 case OMPRTL___kmpc_global_thread_num: 3963 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 3964 case OMPRTL___kmpc_get_hardware_num_blocks: 3965 case OMPRTL___kmpc_single: 3966 case OMPRTL___kmpc_end_single: 3967 case OMPRTL___kmpc_master: 3968 case OMPRTL___kmpc_end_master: 3969 case OMPRTL___kmpc_barrier: 3970 case OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2: 3971 case OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2: 3972 case OMPRTL___kmpc_nvptx_end_reduce_nowait: 3973 break; 3974 case OMPRTL___kmpc_distribute_static_init_4: 3975 case OMPRTL___kmpc_distribute_static_init_4u: 3976 case OMPRTL___kmpc_distribute_static_init_8: 3977 case OMPRTL___kmpc_distribute_static_init_8u: 3978 case OMPRTL___kmpc_for_static_init_4: 3979 case OMPRTL___kmpc_for_static_init_4u: 3980 case OMPRTL___kmpc_for_static_init_8: 3981 case OMPRTL___kmpc_for_static_init_8u: { 3982 // Check the schedule and allow static schedule in SPMD mode. 3983 unsigned ScheduleArgOpNo = 2; 3984 auto *ScheduleTypeCI = 3985 dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo)); 3986 unsigned ScheduleTypeVal = 3987 ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0; 3988 switch (OMPScheduleType(ScheduleTypeVal)) { 3989 case OMPScheduleType::Static: 3990 case OMPScheduleType::StaticChunked: 3991 case OMPScheduleType::Distribute: 3992 case OMPScheduleType::DistributeChunked: 3993 break; 3994 default: 3995 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3996 SPMDCompatibilityTracker.insert(&CB); 3997 break; 3998 }; 3999 } break; 4000 case OMPRTL___kmpc_target_init: 4001 KernelInitCB = &CB; 4002 break; 4003 case OMPRTL___kmpc_target_deinit: 4004 KernelDeinitCB = &CB; 4005 break; 4006 case OMPRTL___kmpc_parallel_51: 4007 if (auto *ParallelRegion = dyn_cast<Function>( 4008 CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) { 4009 ReachedKnownParallelRegions.insert(ParallelRegion); 4010 break; 4011 } 4012 // The condition above should usually get the parallel region function 4013 // pointer and record it. In the off chance it doesn't we assume the 4014 // worst. 4015 ReachedUnknownParallelRegions.insert(&CB); 4016 break; 4017 case OMPRTL___kmpc_omp_task: 4018 // We do not look into tasks right now, just give up. 4019 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4020 SPMDCompatibilityTracker.insert(&CB); 4021 ReachedUnknownParallelRegions.insert(&CB); 4022 break; 4023 case OMPRTL___kmpc_alloc_shared: 4024 case OMPRTL___kmpc_free_shared: 4025 // Return without setting a fixpoint, to be resolved in updateImpl. 4026 return; 4027 default: 4028 // Unknown OpenMP runtime calls cannot be executed in SPMD-mode, 4029 // generally. However, they do not hide parallel regions. 4030 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4031 SPMDCompatibilityTracker.insert(&CB); 4032 break; 4033 } 4034 // All other OpenMP runtime calls will not reach parallel regions so they 4035 // can be safely ignored for now. Since it is a known OpenMP runtime call we 4036 // have now modeled all effects and there is no need for any update. 4037 indicateOptimisticFixpoint(); 4038 } 4039 4040 ChangeStatus updateImpl(Attributor &A) override { 4041 // TODO: Once we have call site specific value information we can provide 4042 // call site specific liveness information and then it makes 4043 // sense to specialize attributes for call sites arguments instead of 4044 // redirecting requests to the callee argument. 4045 Function *F = getAssociatedFunction(); 4046 4047 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4048 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F); 4049 4050 // If F is not a runtime function, propagate the AAKernelInfo of the callee. 4051 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 4052 const IRPosition &FnPos = IRPosition::function(*F); 4053 auto &FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED); 4054 if (getState() == FnAA.getState()) 4055 return ChangeStatus::UNCHANGED; 4056 getState() = FnAA.getState(); 4057 return ChangeStatus::CHANGED; 4058 } 4059 4060 // F is a runtime function that allocates or frees memory, check 4061 // AAHeapToStack and AAHeapToShared. 4062 KernelInfoState StateBefore = getState(); 4063 assert((It->getSecond() == OMPRTL___kmpc_alloc_shared || 4064 It->getSecond() == OMPRTL___kmpc_free_shared) && 4065 "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call"); 4066 4067 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4068 4069 auto &HeapToStackAA = A.getAAFor<AAHeapToStack>( 4070 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4071 auto &HeapToSharedAA = A.getAAFor<AAHeapToShared>( 4072 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4073 4074 RuntimeFunction RF = It->getSecond(); 4075 4076 switch (RF) { 4077 // If neither HeapToStack nor HeapToShared assume the call is removed, 4078 // assume SPMD incompatibility. 4079 case OMPRTL___kmpc_alloc_shared: 4080 if (!HeapToStackAA.isAssumedHeapToStack(CB) && 4081 !HeapToSharedAA.isAssumedHeapToShared(CB)) 4082 SPMDCompatibilityTracker.insert(&CB); 4083 break; 4084 case OMPRTL___kmpc_free_shared: 4085 if (!HeapToStackAA.isAssumedHeapToStackRemovedFree(CB) && 4086 !HeapToSharedAA.isAssumedHeapToSharedRemovedFree(CB)) 4087 SPMDCompatibilityTracker.insert(&CB); 4088 break; 4089 default: 4090 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4091 SPMDCompatibilityTracker.insert(&CB); 4092 } 4093 4094 return StateBefore == getState() ? ChangeStatus::UNCHANGED 4095 : ChangeStatus::CHANGED; 4096 } 4097 }; 4098 4099 struct AAFoldRuntimeCall 4100 : public StateWrapper<BooleanState, AbstractAttribute> { 4101 using Base = StateWrapper<BooleanState, AbstractAttribute>; 4102 4103 AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 4104 4105 /// Statistics are tracked as part of manifest for now. 4106 void trackStatistics() const override {} 4107 4108 /// Create an abstract attribute biew for the position \p IRP. 4109 static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP, 4110 Attributor &A); 4111 4112 /// See AbstractAttribute::getName() 4113 const std::string getName() const override { return "AAFoldRuntimeCall"; } 4114 4115 /// See AbstractAttribute::getIdAddr() 4116 const char *getIdAddr() const override { return &ID; } 4117 4118 /// This function should return true if the type of the \p AA is 4119 /// AAFoldRuntimeCall 4120 static bool classof(const AbstractAttribute *AA) { 4121 return (AA->getIdAddr() == &ID); 4122 } 4123 4124 static const char ID; 4125 }; 4126 4127 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { 4128 AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A) 4129 : AAFoldRuntimeCall(IRP, A) {} 4130 4131 /// See AbstractAttribute::getAsStr() 4132 const std::string getAsStr() const override { 4133 if (!isValidState()) 4134 return "<invalid>"; 4135 4136 std::string Str("simplified value: "); 4137 4138 if (!SimplifiedValue.hasValue()) 4139 return Str + std::string("none"); 4140 4141 if (!SimplifiedValue.getValue()) 4142 return Str + std::string("nullptr"); 4143 4144 if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue())) 4145 return Str + std::to_string(CI->getSExtValue()); 4146 4147 return Str + std::string("unknown"); 4148 } 4149 4150 void initialize(Attributor &A) override { 4151 if (DisableOpenMPOptFolding) 4152 indicatePessimisticFixpoint(); 4153 4154 Function *Callee = getAssociatedFunction(); 4155 4156 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4157 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 4158 assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() && 4159 "Expected a known OpenMP runtime function"); 4160 4161 RFKind = It->getSecond(); 4162 4163 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4164 A.registerSimplificationCallback( 4165 IRPosition::callsite_returned(CB), 4166 [&](const IRPosition &IRP, const AbstractAttribute *AA, 4167 bool &UsedAssumedInformation) -> Optional<Value *> { 4168 assert((isValidState() || (SimplifiedValue.hasValue() && 4169 SimplifiedValue.getValue() == nullptr)) && 4170 "Unexpected invalid state!"); 4171 4172 if (!isAtFixpoint()) { 4173 UsedAssumedInformation = true; 4174 if (AA) 4175 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 4176 } 4177 return SimplifiedValue; 4178 }); 4179 } 4180 4181 ChangeStatus updateImpl(Attributor &A) override { 4182 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4183 switch (RFKind) { 4184 case OMPRTL___kmpc_is_spmd_exec_mode: 4185 Changed |= foldIsSPMDExecMode(A); 4186 break; 4187 case OMPRTL___kmpc_is_generic_main_thread_id: 4188 Changed |= foldIsGenericMainThread(A); 4189 break; 4190 case OMPRTL___kmpc_parallel_level: 4191 Changed |= foldParallelLevel(A); 4192 break; 4193 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 4194 Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit"); 4195 break; 4196 case OMPRTL___kmpc_get_hardware_num_blocks: 4197 Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams"); 4198 break; 4199 default: 4200 llvm_unreachable("Unhandled OpenMP runtime function!"); 4201 } 4202 4203 return Changed; 4204 } 4205 4206 ChangeStatus manifest(Attributor &A) override { 4207 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4208 4209 if (SimplifiedValue.hasValue() && SimplifiedValue.getValue()) { 4210 Instruction &I = *getCtxI(); 4211 A.changeValueAfterManifest(I, **SimplifiedValue); 4212 A.deleteAfterManifest(I); 4213 4214 CallBase *CB = dyn_cast<CallBase>(&I); 4215 auto Remark = [&](OptimizationRemark OR) { 4216 if (auto *C = dyn_cast<ConstantInt>(*SimplifiedValue)) 4217 return OR << "Replacing OpenMP runtime call " 4218 << CB->getCalledFunction()->getName() << " with " 4219 << ore::NV("FoldedValue", C->getZExtValue()) << "."; 4220 return OR << "Replacing OpenMP runtime call " 4221 << CB->getCalledFunction()->getName() << "."; 4222 }; 4223 4224 if (CB && EnableVerboseRemarks) 4225 A.emitRemark<OptimizationRemark>(CB, "OMP180", Remark); 4226 4227 LLVM_DEBUG(dbgs() << TAG << "Replacing runtime call: " << I << " with " 4228 << **SimplifiedValue << "\n"); 4229 4230 Changed = ChangeStatus::CHANGED; 4231 } 4232 4233 return Changed; 4234 } 4235 4236 ChangeStatus indicatePessimisticFixpoint() override { 4237 SimplifiedValue = nullptr; 4238 return AAFoldRuntimeCall::indicatePessimisticFixpoint(); 4239 } 4240 4241 private: 4242 /// Fold __kmpc_is_spmd_exec_mode into a constant if possible. 4243 ChangeStatus foldIsSPMDExecMode(Attributor &A) { 4244 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4245 4246 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 4247 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 4248 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4249 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4250 4251 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 4252 return indicatePessimisticFixpoint(); 4253 4254 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 4255 auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 4256 DepClassTy::REQUIRED); 4257 4258 if (!AA.isValidState()) { 4259 SimplifiedValue = nullptr; 4260 return indicatePessimisticFixpoint(); 4261 } 4262 4263 if (AA.SPMDCompatibilityTracker.isAssumed()) { 4264 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4265 ++KnownSPMDCount; 4266 else 4267 ++AssumedSPMDCount; 4268 } else { 4269 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4270 ++KnownNonSPMDCount; 4271 else 4272 ++AssumedNonSPMDCount; 4273 } 4274 } 4275 4276 if ((AssumedSPMDCount + KnownSPMDCount) && 4277 (AssumedNonSPMDCount + KnownNonSPMDCount)) 4278 return indicatePessimisticFixpoint(); 4279 4280 auto &Ctx = getAnchorValue().getContext(); 4281 if (KnownSPMDCount || AssumedSPMDCount) { 4282 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 4283 "Expected only SPMD kernels!"); 4284 // All reaching kernels are in SPMD mode. Update all function calls to 4285 // __kmpc_is_spmd_exec_mode to 1. 4286 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); 4287 } else if (KnownNonSPMDCount || AssumedNonSPMDCount) { 4288 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 4289 "Expected only non-SPMD kernels!"); 4290 // All reaching kernels are in non-SPMD mode. Update all function 4291 // calls to __kmpc_is_spmd_exec_mode to 0. 4292 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false); 4293 } else { 4294 // We have empty reaching kernels, therefore we cannot tell if the 4295 // associated call site can be folded. At this moment, SimplifiedValue 4296 // must be none. 4297 assert(!SimplifiedValue.hasValue() && "SimplifiedValue should be none"); 4298 } 4299 4300 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4301 : ChangeStatus::CHANGED; 4302 } 4303 4304 /// Fold __kmpc_is_generic_main_thread_id into a constant if possible. 4305 ChangeStatus foldIsGenericMainThread(Attributor &A) { 4306 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4307 4308 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4309 Function *F = CB.getFunction(); 4310 const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>( 4311 *this, IRPosition::function(*F), DepClassTy::REQUIRED); 4312 4313 if (!ExecutionDomainAA.isValidState()) 4314 return indicatePessimisticFixpoint(); 4315 4316 auto &Ctx = getAnchorValue().getContext(); 4317 if (ExecutionDomainAA.isExecutedByInitialThreadOnly(CB)) 4318 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); 4319 else 4320 return indicatePessimisticFixpoint(); 4321 4322 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4323 : ChangeStatus::CHANGED; 4324 } 4325 4326 /// Fold __kmpc_parallel_level into a constant if possible. 4327 ChangeStatus foldParallelLevel(Attributor &A) { 4328 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4329 4330 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4331 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4332 4333 if (!CallerKernelInfoAA.ParallelLevels.isValidState()) 4334 return indicatePessimisticFixpoint(); 4335 4336 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 4337 return indicatePessimisticFixpoint(); 4338 4339 if (CallerKernelInfoAA.ReachingKernelEntries.empty()) { 4340 assert(!SimplifiedValue.hasValue() && 4341 "SimplifiedValue should keep none at this point"); 4342 return ChangeStatus::UNCHANGED; 4343 } 4344 4345 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 4346 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 4347 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 4348 auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 4349 DepClassTy::REQUIRED); 4350 if (!AA.SPMDCompatibilityTracker.isValidState()) 4351 return indicatePessimisticFixpoint(); 4352 4353 if (AA.SPMDCompatibilityTracker.isAssumed()) { 4354 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4355 ++KnownSPMDCount; 4356 else 4357 ++AssumedSPMDCount; 4358 } else { 4359 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4360 ++KnownNonSPMDCount; 4361 else 4362 ++AssumedNonSPMDCount; 4363 } 4364 } 4365 4366 if ((AssumedSPMDCount + KnownSPMDCount) && 4367 (AssumedNonSPMDCount + KnownNonSPMDCount)) 4368 return indicatePessimisticFixpoint(); 4369 4370 auto &Ctx = getAnchorValue().getContext(); 4371 // If the caller can only be reached by SPMD kernel entries, the parallel 4372 // level is 1. Similarly, if the caller can only be reached by non-SPMD 4373 // kernel entries, it is 0. 4374 if (AssumedSPMDCount || KnownSPMDCount) { 4375 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 4376 "Expected only SPMD kernels!"); 4377 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1); 4378 } else { 4379 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 4380 "Expected only non-SPMD kernels!"); 4381 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0); 4382 } 4383 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4384 : ChangeStatus::CHANGED; 4385 } 4386 4387 ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) { 4388 // Specialize only if all the calls agree with the attribute constant value 4389 int32_t CurrentAttrValue = -1; 4390 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4391 4392 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4393 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4394 4395 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 4396 return indicatePessimisticFixpoint(); 4397 4398 // Iterate over the kernels that reach this function 4399 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 4400 int32_t NextAttrVal = -1; 4401 if (K->hasFnAttribute(Attr)) 4402 NextAttrVal = 4403 std::stoi(K->getFnAttribute(Attr).getValueAsString().str()); 4404 4405 if (NextAttrVal == -1 || 4406 (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal)) 4407 return indicatePessimisticFixpoint(); 4408 CurrentAttrValue = NextAttrVal; 4409 } 4410 4411 if (CurrentAttrValue != -1) { 4412 auto &Ctx = getAnchorValue().getContext(); 4413 SimplifiedValue = 4414 ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue); 4415 } 4416 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4417 : ChangeStatus::CHANGED; 4418 } 4419 4420 /// An optional value the associated value is assumed to fold to. That is, we 4421 /// assume the associated value (which is a call) can be replaced by this 4422 /// simplified value. 4423 Optional<Value *> SimplifiedValue; 4424 4425 /// The runtime function kind of the callee of the associated call site. 4426 RuntimeFunction RFKind; 4427 }; 4428 4429 } // namespace 4430 4431 /// Register folding callsite 4432 void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) { 4433 auto &RFI = OMPInfoCache.RFIs[RF]; 4434 RFI.foreachUse(SCC, [&](Use &U, Function &F) { 4435 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI); 4436 if (!CI) 4437 return false; 4438 A.getOrCreateAAFor<AAFoldRuntimeCall>( 4439 IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr, 4440 DepClassTy::NONE, /* ForceUpdate */ false, 4441 /* UpdateAfterInit */ false); 4442 return false; 4443 }); 4444 } 4445 4446 void OpenMPOpt::registerAAs(bool IsModulePass) { 4447 if (SCC.empty()) 4448 4449 return; 4450 if (IsModulePass) { 4451 // Ensure we create the AAKernelInfo AAs first and without triggering an 4452 // update. This will make sure we register all value simplification 4453 // callbacks before any other AA has the chance to create an AAValueSimplify 4454 // or similar. 4455 for (Function *Kernel : OMPInfoCache.Kernels) 4456 A.getOrCreateAAFor<AAKernelInfo>( 4457 IRPosition::function(*Kernel), /* QueryingAA */ nullptr, 4458 DepClassTy::NONE, /* ForceUpdate */ false, 4459 /* UpdateAfterInit */ false); 4460 4461 registerFoldRuntimeCall(OMPRTL___kmpc_is_generic_main_thread_id); 4462 registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode); 4463 registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level); 4464 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block); 4465 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks); 4466 } 4467 4468 // Create CallSite AA for all Getters. 4469 for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { 4470 auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)]; 4471 4472 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 4473 4474 auto CreateAA = [&](Use &U, Function &Caller) { 4475 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 4476 if (!CI) 4477 return false; 4478 4479 auto &CB = cast<CallBase>(*CI); 4480 4481 IRPosition CBPos = IRPosition::callsite_function(CB); 4482 A.getOrCreateAAFor<AAICVTracker>(CBPos); 4483 return false; 4484 }; 4485 4486 GetterRFI.foreachUse(SCC, CreateAA); 4487 } 4488 auto &GlobalizationRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 4489 auto CreateAA = [&](Use &U, Function &F) { 4490 A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F)); 4491 return false; 4492 }; 4493 if (!DisableOpenMPOptDeglobalization) 4494 GlobalizationRFI.foreachUse(SCC, CreateAA); 4495 4496 // Create an ExecutionDomain AA for every function and a HeapToStack AA for 4497 // every function if there is a device kernel. 4498 if (!isOpenMPDevice(M)) 4499 return; 4500 4501 for (auto *F : SCC) { 4502 if (F->isDeclaration()) 4503 continue; 4504 4505 A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(*F)); 4506 if (!DisableOpenMPOptDeglobalization) 4507 A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(*F)); 4508 4509 for (auto &I : instructions(*F)) { 4510 if (auto *LI = dyn_cast<LoadInst>(&I)) { 4511 bool UsedAssumedInformation = false; 4512 A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr, 4513 UsedAssumedInformation); 4514 } 4515 } 4516 } 4517 } 4518 4519 const char AAICVTracker::ID = 0; 4520 const char AAKernelInfo::ID = 0; 4521 const char AAExecutionDomain::ID = 0; 4522 const char AAHeapToShared::ID = 0; 4523 const char AAFoldRuntimeCall::ID = 0; 4524 4525 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 4526 Attributor &A) { 4527 AAICVTracker *AA = nullptr; 4528 switch (IRP.getPositionKind()) { 4529 case IRPosition::IRP_INVALID: 4530 case IRPosition::IRP_FLOAT: 4531 case IRPosition::IRP_ARGUMENT: 4532 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4533 llvm_unreachable("ICVTracker can only be created for function position!"); 4534 case IRPosition::IRP_RETURNED: 4535 AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); 4536 break; 4537 case IRPosition::IRP_CALL_SITE_RETURNED: 4538 AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); 4539 break; 4540 case IRPosition::IRP_CALL_SITE: 4541 AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); 4542 break; 4543 case IRPosition::IRP_FUNCTION: 4544 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 4545 break; 4546 } 4547 4548 return *AA; 4549 } 4550 4551 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP, 4552 Attributor &A) { 4553 AAExecutionDomainFunction *AA = nullptr; 4554 switch (IRP.getPositionKind()) { 4555 case IRPosition::IRP_INVALID: 4556 case IRPosition::IRP_FLOAT: 4557 case IRPosition::IRP_ARGUMENT: 4558 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4559 case IRPosition::IRP_RETURNED: 4560 case IRPosition::IRP_CALL_SITE_RETURNED: 4561 case IRPosition::IRP_CALL_SITE: 4562 llvm_unreachable( 4563 "AAExecutionDomain can only be created for function position!"); 4564 case IRPosition::IRP_FUNCTION: 4565 AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A); 4566 break; 4567 } 4568 4569 return *AA; 4570 } 4571 4572 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP, 4573 Attributor &A) { 4574 AAHeapToSharedFunction *AA = nullptr; 4575 switch (IRP.getPositionKind()) { 4576 case IRPosition::IRP_INVALID: 4577 case IRPosition::IRP_FLOAT: 4578 case IRPosition::IRP_ARGUMENT: 4579 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4580 case IRPosition::IRP_RETURNED: 4581 case IRPosition::IRP_CALL_SITE_RETURNED: 4582 case IRPosition::IRP_CALL_SITE: 4583 llvm_unreachable( 4584 "AAHeapToShared can only be created for function position!"); 4585 case IRPosition::IRP_FUNCTION: 4586 AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A); 4587 break; 4588 } 4589 4590 return *AA; 4591 } 4592 4593 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP, 4594 Attributor &A) { 4595 AAKernelInfo *AA = nullptr; 4596 switch (IRP.getPositionKind()) { 4597 case IRPosition::IRP_INVALID: 4598 case IRPosition::IRP_FLOAT: 4599 case IRPosition::IRP_ARGUMENT: 4600 case IRPosition::IRP_RETURNED: 4601 case IRPosition::IRP_CALL_SITE_RETURNED: 4602 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4603 llvm_unreachable("KernelInfo can only be created for function position!"); 4604 case IRPosition::IRP_CALL_SITE: 4605 AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A); 4606 break; 4607 case IRPosition::IRP_FUNCTION: 4608 AA = new (A.Allocator) AAKernelInfoFunction(IRP, A); 4609 break; 4610 } 4611 4612 return *AA; 4613 } 4614 4615 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP, 4616 Attributor &A) { 4617 AAFoldRuntimeCall *AA = nullptr; 4618 switch (IRP.getPositionKind()) { 4619 case IRPosition::IRP_INVALID: 4620 case IRPosition::IRP_FLOAT: 4621 case IRPosition::IRP_ARGUMENT: 4622 case IRPosition::IRP_RETURNED: 4623 case IRPosition::IRP_FUNCTION: 4624 case IRPosition::IRP_CALL_SITE: 4625 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4626 llvm_unreachable("KernelInfo can only be created for call site position!"); 4627 case IRPosition::IRP_CALL_SITE_RETURNED: 4628 AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A); 4629 break; 4630 } 4631 4632 return *AA; 4633 } 4634 4635 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { 4636 if (!containsOpenMP(M)) 4637 return PreservedAnalyses::all(); 4638 if (DisableOpenMPOptimizations) 4639 return PreservedAnalyses::all(); 4640 4641 FunctionAnalysisManager &FAM = 4642 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 4643 KernelSet Kernels = getDeviceKernels(M); 4644 4645 auto IsCalled = [&](Function &F) { 4646 if (Kernels.contains(&F)) 4647 return true; 4648 for (const User *U : F.users()) 4649 if (!isa<BlockAddress>(U)) 4650 return true; 4651 return false; 4652 }; 4653 4654 auto EmitRemark = [&](Function &F) { 4655 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4656 ORE.emit([&]() { 4657 OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F); 4658 return ORA << "Could not internalize function. " 4659 << "Some optimizations may not be possible. [OMP140]"; 4660 }); 4661 }; 4662 4663 // Create internal copies of each function if this is a kernel Module. This 4664 // allows iterprocedural passes to see every call edge. 4665 DenseMap<Function *, Function *> InternalizedMap; 4666 if (isOpenMPDevice(M)) { 4667 SmallPtrSet<Function *, 16> InternalizeFns; 4668 for (Function &F : M) 4669 if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) && 4670 !DisableInternalization) { 4671 if (Attributor::isInternalizable(F)) { 4672 InternalizeFns.insert(&F); 4673 } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) { 4674 EmitRemark(F); 4675 } 4676 } 4677 4678 Attributor::internalizeFunctions(InternalizeFns, InternalizedMap); 4679 } 4680 4681 // Look at every function in the Module unless it was internalized. 4682 SmallVector<Function *, 16> SCC; 4683 for (Function &F : M) 4684 if (!F.isDeclaration() && !InternalizedMap.lookup(&F)) 4685 SCC.push_back(&F); 4686 4687 if (SCC.empty()) 4688 return PreservedAnalyses::all(); 4689 4690 AnalysisGetter AG(FAM); 4691 4692 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 4693 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 4694 }; 4695 4696 BumpPtrAllocator Allocator; 4697 CallGraphUpdater CGUpdater; 4698 4699 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 4700 OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, Kernels); 4701 4702 unsigned MaxFixpointIterations = 4703 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 4704 Attributor A(Functions, InfoCache, CGUpdater, nullptr, true, false, 4705 MaxFixpointIterations, OREGetter, DEBUG_TYPE); 4706 4707 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 4708 bool Changed = OMPOpt.run(true); 4709 4710 // Optionally inline device functions for potentially better performance. 4711 if (AlwaysInlineDeviceFunctions && isOpenMPDevice(M)) 4712 for (Function &F : M) 4713 if (!F.isDeclaration() && !Kernels.contains(&F) && 4714 !F.hasFnAttribute(Attribute::NoInline)) 4715 F.addFnAttr(Attribute::AlwaysInline); 4716 4717 if (PrintModuleAfterOptimizations) 4718 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt Module Pass:\n" << M); 4719 4720 if (Changed) 4721 return PreservedAnalyses::none(); 4722 4723 return PreservedAnalyses::all(); 4724 } 4725 4726 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, 4727 CGSCCAnalysisManager &AM, 4728 LazyCallGraph &CG, 4729 CGSCCUpdateResult &UR) { 4730 if (!containsOpenMP(*C.begin()->getFunction().getParent())) 4731 return PreservedAnalyses::all(); 4732 if (DisableOpenMPOptimizations) 4733 return PreservedAnalyses::all(); 4734 4735 SmallVector<Function *, 16> SCC; 4736 // If there are kernels in the module, we have to run on all SCC's. 4737 for (LazyCallGraph::Node &N : C) { 4738 Function *Fn = &N.getFunction(); 4739 SCC.push_back(Fn); 4740 } 4741 4742 if (SCC.empty()) 4743 return PreservedAnalyses::all(); 4744 4745 Module &M = *C.begin()->getFunction().getParent(); 4746 4747 KernelSet Kernels = getDeviceKernels(M); 4748 4749 FunctionAnalysisManager &FAM = 4750 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 4751 4752 AnalysisGetter AG(FAM); 4753 4754 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 4755 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 4756 }; 4757 4758 BumpPtrAllocator Allocator; 4759 CallGraphUpdater CGUpdater; 4760 CGUpdater.initialize(CG, C, AM, UR); 4761 4762 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 4763 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 4764 /*CGSCC*/ Functions, Kernels); 4765 4766 unsigned MaxFixpointIterations = 4767 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 4768 Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true, 4769 MaxFixpointIterations, OREGetter, DEBUG_TYPE); 4770 4771 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 4772 bool Changed = OMPOpt.run(false); 4773 4774 if (PrintModuleAfterOptimizations) 4775 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M); 4776 4777 if (Changed) 4778 return PreservedAnalyses::none(); 4779 4780 return PreservedAnalyses::all(); 4781 } 4782 4783 namespace { 4784 4785 struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass { 4786 CallGraphUpdater CGUpdater; 4787 static char ID; 4788 4789 OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) { 4790 initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry()); 4791 } 4792 4793 void getAnalysisUsage(AnalysisUsage &AU) const override { 4794 CallGraphSCCPass::getAnalysisUsage(AU); 4795 } 4796 4797 bool runOnSCC(CallGraphSCC &CGSCC) override { 4798 if (!containsOpenMP(CGSCC.getCallGraph().getModule())) 4799 return false; 4800 if (DisableOpenMPOptimizations || skipSCC(CGSCC)) 4801 return false; 4802 4803 SmallVector<Function *, 16> SCC; 4804 // If there are kernels in the module, we have to run on all SCC's. 4805 for (CallGraphNode *CGN : CGSCC) { 4806 Function *Fn = CGN->getFunction(); 4807 if (!Fn || Fn->isDeclaration()) 4808 continue; 4809 SCC.push_back(Fn); 4810 } 4811 4812 if (SCC.empty()) 4813 return false; 4814 4815 Module &M = CGSCC.getCallGraph().getModule(); 4816 KernelSet Kernels = getDeviceKernels(M); 4817 4818 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 4819 CGUpdater.initialize(CG, CGSCC); 4820 4821 // Maintain a map of functions to avoid rebuilding the ORE 4822 DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap; 4823 auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & { 4824 std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F]; 4825 if (!ORE) 4826 ORE = std::make_unique<OptimizationRemarkEmitter>(F); 4827 return *ORE; 4828 }; 4829 4830 AnalysisGetter AG; 4831 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 4832 BumpPtrAllocator Allocator; 4833 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, 4834 Allocator, 4835 /*CGSCC*/ Functions, Kernels); 4836 4837 unsigned MaxFixpointIterations = 4838 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 4839 Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true, 4840 MaxFixpointIterations, OREGetter, DEBUG_TYPE); 4841 4842 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 4843 bool Result = OMPOpt.run(false); 4844 4845 if (PrintModuleAfterOptimizations) 4846 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M); 4847 4848 return Result; 4849 } 4850 4851 bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } 4852 }; 4853 4854 } // end anonymous namespace 4855 4856 KernelSet llvm::omp::getDeviceKernels(Module &M) { 4857 // TODO: Create a more cross-platform way of determining device kernels. 4858 NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 4859 KernelSet Kernels; 4860 4861 if (!MD) 4862 return Kernels; 4863 4864 for (auto *Op : MD->operands()) { 4865 if (Op->getNumOperands() < 2) 4866 continue; 4867 MDString *KindID = dyn_cast<MDString>(Op->getOperand(1)); 4868 if (!KindID || KindID->getString() != "kernel") 4869 continue; 4870 4871 Function *KernelFn = 4872 mdconst::dyn_extract_or_null<Function>(Op->getOperand(0)); 4873 if (!KernelFn) 4874 continue; 4875 4876 ++NumOpenMPTargetRegionKernels; 4877 4878 Kernels.insert(KernelFn); 4879 } 4880 4881 return Kernels; 4882 } 4883 4884 bool llvm::omp::containsOpenMP(Module &M) { 4885 Metadata *MD = M.getModuleFlag("openmp"); 4886 if (!MD) 4887 return false; 4888 4889 return true; 4890 } 4891 4892 bool llvm::omp::isOpenMPDevice(Module &M) { 4893 Metadata *MD = M.getModuleFlag("openmp-device"); 4894 if (!MD) 4895 return false; 4896 4897 return true; 4898 } 4899 4900 char OpenMPOptCGSCCLegacyPass::ID = 0; 4901 4902 INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 4903 "OpenMP specific optimizations", false, false) 4904 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 4905 INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 4906 "OpenMP specific optimizations", false, false) 4907 4908 Pass *llvm::createOpenMPOptCGSCCLegacyPass() { 4909 return new OpenMPOptCGSCCLegacyPass(); 4910 } 4911