1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/OpenMPOpt.h" 16 17 #include "llvm/ADT/EnumeratedArray.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/CallGraph.h" 20 #include "llvm/Analysis/CallGraphSCCPass.h" 21 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 22 #include "llvm/Frontend/OpenMP/OMPConstants.h" 23 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 24 #include "llvm/InitializePasses.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Transforms/IPO.h" 27 #include "llvm/Transforms/IPO/Attributor.h" 28 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 29 30 using namespace llvm; 31 using namespace omp; 32 33 #define DEBUG_TYPE "openmp-opt" 34 35 static cl::opt<bool> DisableOpenMPOptimizations( 36 "openmp-opt-disable", cl::ZeroOrMore, 37 cl::desc("Disable OpenMP specific optimizations."), cl::Hidden, 38 cl::init(false)); 39 40 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 41 cl::Hidden); 42 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels", 43 cl::init(false), cl::Hidden); 44 45 static cl::opt<bool> HideMemoryTransferLatency( 46 "openmp-hide-memory-transfer-latency", 47 cl::desc("[WIP] Tries to hide the latency of host to device memory" 48 " transfers"), 49 cl::Hidden, cl::init(false)); 50 51 52 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 53 "Number of OpenMP runtime calls deduplicated"); 54 STATISTIC(NumOpenMPParallelRegionsDeleted, 55 "Number of OpenMP parallel regions deleted"); 56 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 57 "Number of OpenMP runtime functions identified"); 58 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 59 "Number of OpenMP runtime function uses identified"); 60 STATISTIC(NumOpenMPTargetRegionKernels, 61 "Number of OpenMP target region entry points (=kernels) identified"); 62 STATISTIC( 63 NumOpenMPParallelRegionsReplacedInGPUStateMachine, 64 "Number of OpenMP parallel regions replaced with ID in GPU state machines"); 65 66 #if !defined(NDEBUG) 67 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 68 #endif 69 70 namespace { 71 72 struct AAICVTracker; 73 74 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 75 /// Attributor runs. 76 struct OMPInformationCache : public InformationCache { 77 OMPInformationCache(Module &M, AnalysisGetter &AG, 78 BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC, 79 SmallPtrSetImpl<Kernel> &Kernels) 80 : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M), 81 Kernels(Kernels) { 82 83 OMPBuilder.initialize(); 84 initializeRuntimeFunctions(); 85 initializeInternalControlVars(); 86 } 87 88 /// Generic information that describes an internal control variable. 89 struct InternalControlVarInfo { 90 /// The kind, as described by InternalControlVar enum. 91 InternalControlVar Kind; 92 93 /// The name of the ICV. 94 StringRef Name; 95 96 /// Environment variable associated with this ICV. 97 StringRef EnvVarName; 98 99 /// Initial value kind. 100 ICVInitValue InitKind; 101 102 /// Initial value. 103 ConstantInt *InitValue; 104 105 /// Setter RTL function associated with this ICV. 106 RuntimeFunction Setter; 107 108 /// Getter RTL function associated with this ICV. 109 RuntimeFunction Getter; 110 111 /// RTL Function corresponding to the override clause of this ICV 112 RuntimeFunction Clause; 113 }; 114 115 /// Generic information that describes a runtime function 116 struct RuntimeFunctionInfo { 117 118 /// The kind, as described by the RuntimeFunction enum. 119 RuntimeFunction Kind; 120 121 /// The name of the function. 122 StringRef Name; 123 124 /// Flag to indicate a variadic function. 125 bool IsVarArg; 126 127 /// The return type of the function. 128 Type *ReturnType; 129 130 /// The argument types of the function. 131 SmallVector<Type *, 8> ArgumentTypes; 132 133 /// The declaration if available. 134 Function *Declaration = nullptr; 135 136 /// Uses of this runtime function per function containing the use. 137 using UseVector = SmallVector<Use *, 16>; 138 139 /// Clear UsesMap for runtime function. 140 void clearUsesMap() { UsesMap.clear(); } 141 142 /// Boolean conversion that is true if the runtime function was found. 143 operator bool() const { return Declaration; } 144 145 /// Return the vector of uses in function \p F. 146 UseVector &getOrCreateUseVector(Function *F) { 147 std::shared_ptr<UseVector> &UV = UsesMap[F]; 148 if (!UV) 149 UV = std::make_shared<UseVector>(); 150 return *UV; 151 } 152 153 /// Return the vector of uses in function \p F or `nullptr` if there are 154 /// none. 155 const UseVector *getUseVector(Function &F) const { 156 auto I = UsesMap.find(&F); 157 if (I != UsesMap.end()) 158 return I->second.get(); 159 return nullptr; 160 } 161 162 /// Return how many functions contain uses of this runtime function. 163 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 164 165 /// Return the number of arguments (or the minimal number for variadic 166 /// functions). 167 size_t getNumArgs() const { return ArgumentTypes.size(); } 168 169 /// Run the callback \p CB on each use and forget the use if the result is 170 /// true. The callback will be fed the function in which the use was 171 /// encountered as second argument. 172 void foreachUse(SmallVectorImpl<Function *> &SCC, 173 function_ref<bool(Use &, Function &)> CB) { 174 for (Function *F : SCC) 175 foreachUse(CB, F); 176 } 177 178 /// Run the callback \p CB on each use within the function \p F and forget 179 /// the use if the result is true. 180 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) { 181 SmallVector<unsigned, 8> ToBeDeleted; 182 ToBeDeleted.clear(); 183 184 unsigned Idx = 0; 185 UseVector &UV = getOrCreateUseVector(F); 186 187 for (Use *U : UV) { 188 if (CB(*U, *F)) 189 ToBeDeleted.push_back(Idx); 190 ++Idx; 191 } 192 193 // Remove the to-be-deleted indices in reverse order as prior 194 // modifications will not modify the smaller indices. 195 while (!ToBeDeleted.empty()) { 196 unsigned Idx = ToBeDeleted.pop_back_val(); 197 UV[Idx] = UV.back(); 198 UV.pop_back(); 199 } 200 } 201 202 private: 203 /// Map from functions to all uses of this runtime function contained in 204 /// them. 205 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 206 }; 207 208 /// An OpenMP-IR-Builder instance 209 OpenMPIRBuilder OMPBuilder; 210 211 /// Map from runtime function kind to the runtime function description. 212 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 213 RuntimeFunction::OMPRTL___last> 214 RFIs; 215 216 /// Map from ICV kind to the ICV description. 217 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 218 InternalControlVar::ICV___last> 219 ICVs; 220 221 /// Helper to initialize all internal control variable information for those 222 /// defined in OMPKinds.def. 223 void initializeInternalControlVars() { 224 #define ICV_RT_SET(_Name, RTL) \ 225 { \ 226 auto &ICV = ICVs[_Name]; \ 227 ICV.Setter = RTL; \ 228 } 229 #define ICV_RT_GET(Name, RTL) \ 230 { \ 231 auto &ICV = ICVs[Name]; \ 232 ICV.Getter = RTL; \ 233 } 234 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 235 { \ 236 auto &ICV = ICVs[Enum]; \ 237 ICV.Name = _Name; \ 238 ICV.Kind = Enum; \ 239 ICV.InitKind = Init; \ 240 ICV.EnvVarName = _EnvVarName; \ 241 switch (ICV.InitKind) { \ 242 case ICV_IMPLEMENTATION_DEFINED: \ 243 ICV.InitValue = nullptr; \ 244 break; \ 245 case ICV_ZERO: \ 246 ICV.InitValue = ConstantInt::get( \ 247 Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ 248 break; \ 249 case ICV_FALSE: \ 250 ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ 251 break; \ 252 case ICV_LAST: \ 253 break; \ 254 } \ 255 } 256 #include "llvm/Frontend/OpenMP/OMPKinds.def" 257 } 258 259 /// Returns true if the function declaration \p F matches the runtime 260 /// function types, that is, return type \p RTFRetType, and argument types 261 /// \p RTFArgTypes. 262 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 263 SmallVector<Type *, 8> &RTFArgTypes) { 264 // TODO: We should output information to the user (under debug output 265 // and via remarks). 266 267 if (!F) 268 return false; 269 if (F->getReturnType() != RTFRetType) 270 return false; 271 if (F->arg_size() != RTFArgTypes.size()) 272 return false; 273 274 auto RTFTyIt = RTFArgTypes.begin(); 275 for (Argument &Arg : F->args()) { 276 if (Arg.getType() != *RTFTyIt) 277 return false; 278 279 ++RTFTyIt; 280 } 281 282 return true; 283 } 284 285 // Helper to collect all uses of the declaration in the UsesMap. 286 unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { 287 unsigned NumUses = 0; 288 if (!RFI.Declaration) 289 return NumUses; 290 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 291 292 if (CollectStats) { 293 NumOpenMPRuntimeFunctionsIdentified += 1; 294 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 295 } 296 297 // TODO: We directly convert uses into proper calls and unknown uses. 298 for (Use &U : RFI.Declaration->uses()) { 299 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 300 if (ModuleSlice.count(UserI->getFunction())) { 301 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 302 ++NumUses; 303 } 304 } else { 305 RFI.getOrCreateUseVector(nullptr).push_back(&U); 306 ++NumUses; 307 } 308 } 309 return NumUses; 310 } 311 312 // Helper function to recollect uses of all runtime functions. 313 void recollectUses() { 314 for (int Idx = 0; Idx < RFIs.size(); ++Idx) { 315 auto &RFI = RFIs[static_cast<RuntimeFunction>(Idx)]; 316 RFI.clearUsesMap(); 317 collectUses(RFI, /*CollectStats*/ false); 318 } 319 } 320 321 /// Helper to initialize all runtime function information for those defined 322 /// in OpenMPKinds.def. 323 void initializeRuntimeFunctions() { 324 Module &M = *((*ModuleSlice.begin())->getParent()); 325 326 // Helper macros for handling __VA_ARGS__ in OMP_RTL 327 #define OMP_TYPE(VarName, ...) \ 328 Type *VarName = OMPBuilder.VarName; \ 329 (void)VarName; 330 331 #define OMP_ARRAY_TYPE(VarName, ...) \ 332 ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ 333 (void)VarName##Ty; \ 334 PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ 335 (void)VarName##PtrTy; 336 337 #define OMP_FUNCTION_TYPE(VarName, ...) \ 338 FunctionType *VarName = OMPBuilder.VarName; \ 339 (void)VarName; \ 340 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 341 (void)VarName##Ptr; 342 343 #define OMP_STRUCT_TYPE(VarName, ...) \ 344 StructType *VarName = OMPBuilder.VarName; \ 345 (void)VarName; \ 346 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 347 (void)VarName##Ptr; 348 349 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 350 { \ 351 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 352 Function *F = M.getFunction(_Name); \ 353 if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ 354 auto &RFI = RFIs[_Enum]; \ 355 RFI.Kind = _Enum; \ 356 RFI.Name = _Name; \ 357 RFI.IsVarArg = _IsVarArg; \ 358 RFI.ReturnType = OMPBuilder._ReturnType; \ 359 RFI.ArgumentTypes = std::move(ArgsTypes); \ 360 RFI.Declaration = F; \ 361 unsigned NumUses = collectUses(RFI); \ 362 (void)NumUses; \ 363 LLVM_DEBUG({ \ 364 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 365 << " found\n"; \ 366 if (RFI.Declaration) \ 367 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 368 << RFI.getNumFunctionsWithUses() \ 369 << " different functions.\n"; \ 370 }); \ 371 } \ 372 } 373 #include "llvm/Frontend/OpenMP/OMPKinds.def" 374 375 // TODO: We should attach the attributes defined in OMPKinds.def. 376 } 377 378 /// Collection of known kernels (\see Kernel) in the module. 379 SmallPtrSetImpl<Kernel> &Kernels; 380 }; 381 382 struct OpenMPOpt { 383 384 using OptimizationRemarkGetter = 385 function_ref<OptimizationRemarkEmitter &(Function *)>; 386 387 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 388 OptimizationRemarkGetter OREGetter, 389 OMPInformationCache &OMPInfoCache, Attributor &A) 390 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 391 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 392 393 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 394 bool run() { 395 if (SCC.empty()) 396 return false; 397 398 bool Changed = false; 399 400 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 401 << " functions in a slice with " 402 << OMPInfoCache.ModuleSlice.size() << " functions\n"); 403 404 if (PrintICVValues) 405 printICVs(); 406 if (PrintOpenMPKernels) 407 printKernels(); 408 409 Changed |= rewriteDeviceCodeStateMachine(); 410 411 Changed |= runAttributor(); 412 413 // Recollect uses, in case Attributor deleted any. 414 OMPInfoCache.recollectUses(); 415 416 Changed |= deduplicateRuntimeCalls(); 417 Changed |= deleteParallelRegions(); 418 if (HideMemoryTransferLatency) 419 Changed |= hideMemTransfersLatency(); 420 421 return Changed; 422 } 423 424 /// Print initial ICV values for testing. 425 /// FIXME: This should be done from the Attributor once it is added. 426 void printICVs() const { 427 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel}; 428 429 for (Function *F : OMPInfoCache.ModuleSlice) { 430 for (auto ICV : ICVs) { 431 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 432 auto Remark = [&](OptimizationRemark OR) { 433 return OR << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 434 << " Value: " 435 << (ICVInfo.InitValue 436 ? ICVInfo.InitValue->getValue().toString(10, true) 437 : "IMPLEMENTATION_DEFINED"); 438 }; 439 440 emitRemarkOnFunction(F, "OpenMPICVTracker", Remark); 441 } 442 } 443 } 444 445 /// Print OpenMP GPU kernels for testing. 446 void printKernels() const { 447 for (Function *F : SCC) { 448 if (!OMPInfoCache.Kernels.count(F)) 449 continue; 450 451 auto Remark = [&](OptimizationRemark OR) { 452 return OR << "OpenMP GPU kernel " 453 << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; 454 }; 455 456 emitRemarkOnFunction(F, "OpenMPGPU", Remark); 457 } 458 } 459 460 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 461 /// given it has to be the callee or a nullptr is returned. 462 static CallInst *getCallIfRegularCall( 463 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 464 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 465 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 466 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 467 return CI; 468 return nullptr; 469 } 470 471 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 472 /// the callee or a nullptr is returned. 473 static CallInst *getCallIfRegularCall( 474 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 475 CallInst *CI = dyn_cast<CallInst>(&V); 476 if (CI && !CI->hasOperandBundles() && 477 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 478 return CI; 479 return nullptr; 480 } 481 482 private: 483 /// Try to delete parallel regions if possible. 484 bool deleteParallelRegions() { 485 const unsigned CallbackCalleeOperand = 2; 486 487 OMPInformationCache::RuntimeFunctionInfo &RFI = 488 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 489 490 if (!RFI.Declaration) 491 return false; 492 493 bool Changed = false; 494 auto DeleteCallCB = [&](Use &U, Function &) { 495 CallInst *CI = getCallIfRegularCall(U); 496 if (!CI) 497 return false; 498 auto *Fn = dyn_cast<Function>( 499 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 500 if (!Fn) 501 return false; 502 if (!Fn->onlyReadsMemory()) 503 return false; 504 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 505 return false; 506 507 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 508 << CI->getCaller()->getName() << "\n"); 509 510 auto Remark = [&](OptimizationRemark OR) { 511 return OR << "Parallel region in " 512 << ore::NV("OpenMPParallelDelete", CI->getCaller()->getName()) 513 << " deleted"; 514 }; 515 emitRemark<OptimizationRemark>(CI, "OpenMPParallelRegionDeletion", 516 Remark); 517 518 CGUpdater.removeCallSite(*CI); 519 CI->eraseFromParent(); 520 Changed = true; 521 ++NumOpenMPParallelRegionsDeleted; 522 return true; 523 }; 524 525 RFI.foreachUse(SCC, DeleteCallCB); 526 527 return Changed; 528 } 529 530 /// Try to eliminate runtime calls by reusing existing ones. 531 bool deduplicateRuntimeCalls() { 532 bool Changed = false; 533 534 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 535 OMPRTL_omp_get_num_threads, 536 OMPRTL_omp_in_parallel, 537 OMPRTL_omp_get_cancellation, 538 OMPRTL_omp_get_thread_limit, 539 OMPRTL_omp_get_supported_active_levels, 540 OMPRTL_omp_get_level, 541 OMPRTL_omp_get_ancestor_thread_num, 542 OMPRTL_omp_get_team_size, 543 OMPRTL_omp_get_active_level, 544 OMPRTL_omp_in_final, 545 OMPRTL_omp_get_proc_bind, 546 OMPRTL_omp_get_num_places, 547 OMPRTL_omp_get_num_procs, 548 OMPRTL_omp_get_place_num, 549 OMPRTL_omp_get_partition_num_places, 550 OMPRTL_omp_get_partition_place_nums}; 551 552 // Global-tid is handled separately. 553 SmallSetVector<Value *, 16> GTIdArgs; 554 collectGlobalThreadIdArguments(GTIdArgs); 555 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 556 << " global thread ID arguments\n"); 557 558 for (Function *F : SCC) { 559 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 560 Changed |= deduplicateRuntimeCalls( 561 *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 562 563 // __kmpc_global_thread_num is special as we can replace it with an 564 // argument in enough cases to make it worth trying. 565 Value *GTIdArg = nullptr; 566 for (Argument &Arg : F->args()) 567 if (GTIdArgs.count(&Arg)) { 568 GTIdArg = &Arg; 569 break; 570 } 571 Changed |= deduplicateRuntimeCalls( 572 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 573 } 574 575 return Changed; 576 } 577 578 /// Tries to hide the latency of runtime calls that involve host to 579 /// device memory transfers by splitting them into their "issue" and "wait" 580 /// versions. The "issue" is moved upwards as much as possible. The "wait" is 581 /// moved downards as much as possible. The "issue" issues the memory transfer 582 /// asynchronously, returning a handle. The "wait" waits in the returned 583 /// handle for the memory transfer to finish. 584 bool hideMemTransfersLatency() { 585 auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; 586 bool Changed = false; 587 auto SplitMemTransfers = [&](Use &U, Function &Decl) { 588 auto *RTCall = getCallIfRegularCall(U, &RFI); 589 if (!RTCall) 590 return false; 591 592 // TODO: Check if can be moved upwards. 593 bool WasSplit = false; 594 Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); 595 if (WaitMovementPoint) 596 WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); 597 598 Changed |= WasSplit; 599 return WasSplit; 600 }; 601 RFI.foreachUse(SCC, SplitMemTransfers); 602 603 return Changed; 604 } 605 606 /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be 607 /// moved. Returns nullptr if the movement is not possible, or not worth it. 608 Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { 609 // FIXME: This traverses only the BasicBlock where RuntimeCall is. 610 // Make it traverse the CFG. 611 612 Instruction *CurrentI = &RuntimeCall; 613 bool IsWorthIt = false; 614 while ((CurrentI = CurrentI->getNextNode())) { 615 616 // TODO: Once we detect the regions to be offloaded we should use the 617 // alias analysis manager to check if CurrentI may modify one of 618 // the offloaded regions. 619 if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { 620 if (IsWorthIt) 621 return CurrentI; 622 623 return nullptr; 624 } 625 626 // FIXME: For now if we move it over anything without side effect 627 // is worth it. 628 IsWorthIt = true; 629 } 630 631 // Return end of BasicBlock. 632 return RuntimeCall.getParent()->getTerminator(); 633 } 634 635 /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. 636 bool splitTargetDataBeginRTC(CallInst &RuntimeCall, 637 Instruction &WaitMovementPoint) { 638 auto &IRBuilder = OMPInfoCache.OMPBuilder; 639 // Add "issue" runtime call declaration: 640 // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, 641 // i8**, i8**, i64*, i64*) 642 FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( 643 M, OMPRTL___tgt_target_data_begin_mapper_issue); 644 645 // Change RuntimeCall call site for its asynchronous version. 646 SmallVector<Value *, 8> Args; 647 for (auto &Arg : RuntimeCall.args()) 648 Args.push_back(Arg.get()); 649 650 CallInst *IssueCallsite = 651 CallInst::Create(IssueDecl, Args, "handle", &RuntimeCall); 652 RuntimeCall.eraseFromParent(); 653 654 // Add "wait" runtime call declaration: 655 // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) 656 FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( 657 M, OMPRTL___tgt_target_data_begin_mapper_wait); 658 659 // Add call site to WaitDecl. 660 Value *WaitParams[2] = { 661 IssueCallsite->getArgOperand(0), // device_id. 662 IssueCallsite // returned handle. 663 }; 664 CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); 665 666 return true; 667 } 668 669 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 670 bool GlobalOnly, bool &SingleChoice) { 671 if (CurrentIdent == NextIdent) 672 return CurrentIdent; 673 674 // TODO: Figure out how to actually combine multiple debug locations. For 675 // now we just keep an existing one if there is a single choice. 676 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 677 SingleChoice = !CurrentIdent; 678 return NextIdent; 679 } 680 return nullptr; 681 } 682 683 /// Return an `struct ident_t*` value that represents the ones used in the 684 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 685 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 686 /// return value we create one from scratch. We also do not yet combine 687 /// information, e.g., the source locations, see combinedIdentStruct. 688 Value * 689 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 690 Function &F, bool GlobalOnly) { 691 bool SingleChoice = true; 692 Value *Ident = nullptr; 693 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 694 CallInst *CI = getCallIfRegularCall(U, &RFI); 695 if (!CI || &F != &Caller) 696 return false; 697 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 698 /* GlobalOnly */ true, SingleChoice); 699 return false; 700 }; 701 RFI.foreachUse(SCC, CombineIdentStruct); 702 703 if (!Ident || !SingleChoice) { 704 // The IRBuilder uses the insertion block to get to the module, this is 705 // unfortunate but we work around it for now. 706 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 707 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 708 &F.getEntryBlock(), F.getEntryBlock().begin())); 709 // Create a fallback location if non was found. 710 // TODO: Use the debug locations of the calls instead. 711 Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(); 712 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc); 713 } 714 return Ident; 715 } 716 717 /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or 718 /// \p ReplVal if given. 719 bool deduplicateRuntimeCalls(Function &F, 720 OMPInformationCache::RuntimeFunctionInfo &RFI, 721 Value *ReplVal = nullptr) { 722 auto *UV = RFI.getUseVector(F); 723 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 724 return false; 725 726 LLVM_DEBUG( 727 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 728 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 729 730 assert((!ReplVal || (isa<Argument>(ReplVal) && 731 cast<Argument>(ReplVal)->getParent() == &F)) && 732 "Unexpected replacement value!"); 733 734 // TODO: Use dominance to find a good position instead. 735 auto CanBeMoved = [this](CallBase &CB) { 736 unsigned NumArgs = CB.getNumArgOperands(); 737 if (NumArgs == 0) 738 return true; 739 if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) 740 return false; 741 for (unsigned u = 1; u < NumArgs; ++u) 742 if (isa<Instruction>(CB.getArgOperand(u))) 743 return false; 744 return true; 745 }; 746 747 if (!ReplVal) { 748 for (Use *U : *UV) 749 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 750 if (!CanBeMoved(*CI)) 751 continue; 752 753 auto Remark = [&](OptimizationRemark OR) { 754 auto newLoc = &*F.getEntryBlock().getFirstInsertionPt(); 755 return OR << "OpenMP runtime call " 756 << ore::NV("OpenMPOptRuntime", RFI.Name) << " moved to " 757 << ore::NV("OpenMPRuntimeMoves", newLoc->getDebugLoc()); 758 }; 759 emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeCodeMotion", Remark); 760 761 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 762 ReplVal = CI; 763 break; 764 } 765 if (!ReplVal) 766 return false; 767 } 768 769 // If we use a call as a replacement value we need to make sure the ident is 770 // valid at the new location. For now we just pick a global one, either 771 // existing and used by one of the calls, or created from scratch. 772 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 773 if (CI->getNumArgOperands() > 0 && 774 CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { 775 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 776 /* GlobalOnly */ true); 777 CI->setArgOperand(0, Ident); 778 } 779 } 780 781 bool Changed = false; 782 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 783 CallInst *CI = getCallIfRegularCall(U, &RFI); 784 if (!CI || CI == ReplVal || &F != &Caller) 785 return false; 786 assert(CI->getCaller() == &F && "Unexpected call!"); 787 788 auto Remark = [&](OptimizationRemark OR) { 789 return OR << "OpenMP runtime call " 790 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated"; 791 }; 792 emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeDeduplicated", Remark); 793 794 CGUpdater.removeCallSite(*CI); 795 CI->replaceAllUsesWith(ReplVal); 796 CI->eraseFromParent(); 797 ++NumOpenMPRuntimeCallsDeduplicated; 798 Changed = true; 799 return true; 800 }; 801 RFI.foreachUse(SCC, ReplaceAndDeleteCB); 802 803 return Changed; 804 } 805 806 /// Collect arguments that represent the global thread id in \p GTIdArgs. 807 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 808 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 809 // initialization. We could define an AbstractAttribute instead and 810 // run the Attributor here once it can be run as an SCC pass. 811 812 // Helper to check the argument \p ArgNo at all call sites of \p F for 813 // a GTId. 814 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 815 if (!F.hasLocalLinkage()) 816 return false; 817 for (Use &U : F.uses()) { 818 if (CallInst *CI = getCallIfRegularCall(U)) { 819 Value *ArgOp = CI->getArgOperand(ArgNo); 820 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 821 getCallIfRegularCall( 822 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 823 continue; 824 } 825 return false; 826 } 827 return true; 828 }; 829 830 // Helper to identify uses of a GTId as GTId arguments. 831 auto AddUserArgs = [&](Value >Id) { 832 for (Use &U : GTId.uses()) 833 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 834 if (CI->isArgOperand(&U)) 835 if (Function *Callee = CI->getCalledFunction()) 836 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 837 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 838 }; 839 840 // The argument users of __kmpc_global_thread_num calls are GTIds. 841 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 842 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 843 844 GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { 845 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 846 AddUserArgs(*CI); 847 return false; 848 }); 849 850 // Transitively search for more arguments by looking at the users of the 851 // ones we know already. During the search the GTIdArgs vector is extended 852 // so we cannot cache the size nor can we use a range based for. 853 for (unsigned u = 0; u < GTIdArgs.size(); ++u) 854 AddUserArgs(*GTIdArgs[u]); 855 } 856 857 /// Kernel (=GPU) optimizations and utility functions 858 /// 859 ///{{ 860 861 /// Check if \p F is a kernel, hence entry point for target offloading. 862 bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); } 863 864 /// Cache to remember the unique kernel for a function. 865 DenseMap<Function *, Optional<Kernel>> UniqueKernelMap; 866 867 /// Find the unique kernel that will execute \p F, if any. 868 Kernel getUniqueKernelFor(Function &F); 869 870 /// Find the unique kernel that will execute \p I, if any. 871 Kernel getUniqueKernelFor(Instruction &I) { 872 return getUniqueKernelFor(*I.getFunction()); 873 } 874 875 /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in 876 /// the cases we can avoid taking the address of a function. 877 bool rewriteDeviceCodeStateMachine(); 878 879 /// 880 ///}} 881 882 /// Emit a remark generically 883 /// 884 /// This template function can be used to generically emit a remark. The 885 /// RemarkKind should be one of the following: 886 /// - OptimizationRemark to indicate a successful optimization attempt 887 /// - OptimizationRemarkMissed to report a failed optimization attempt 888 /// - OptimizationRemarkAnalysis to provide additional information about an 889 /// optimization attempt 890 /// 891 /// The remark is built using a callback function provided by the caller that 892 /// takes a RemarkKind as input and returns a RemarkKind. 893 template <typename RemarkKind, 894 typename RemarkCallBack = function_ref<RemarkKind(RemarkKind &&)>> 895 void emitRemark(Instruction *Inst, StringRef RemarkName, 896 RemarkCallBack &&RemarkCB) const { 897 Function *F = Inst->getParent()->getParent(); 898 auto &ORE = OREGetter(F); 899 900 ORE.emit( 901 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, Inst)); }); 902 } 903 904 /// Emit a remark on a function. Since only OptimizationRemark is supporting 905 /// this, it can't be made generic. 906 void 907 emitRemarkOnFunction(Function *F, StringRef RemarkName, 908 function_ref<OptimizationRemark(OptimizationRemark &&)> 909 &&RemarkCB) const { 910 auto &ORE = OREGetter(F); 911 912 ORE.emit([&]() { 913 return RemarkCB(OptimizationRemark(DEBUG_TYPE, RemarkName, F)); 914 }); 915 } 916 917 /// The underlying module. 918 Module &M; 919 920 /// The SCC we are operating on. 921 SmallVectorImpl<Function *> &SCC; 922 923 /// Callback to update the call graph, the first argument is a removed call, 924 /// the second an optional replacement call. 925 CallGraphUpdater &CGUpdater; 926 927 /// Callback to get an OptimizationRemarkEmitter from a Function * 928 OptimizationRemarkGetter OREGetter; 929 930 /// OpenMP-specific information cache. Also Used for Attributor runs. 931 OMPInformationCache &OMPInfoCache; 932 933 /// Attributor instance. 934 Attributor &A; 935 936 /// Helper function to run Attributor on SCC. 937 bool runAttributor() { 938 if (SCC.empty()) 939 return false; 940 941 registerAAs(); 942 943 ChangeStatus Changed = A.run(); 944 945 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 946 << " functions, result: " << Changed << ".\n"); 947 948 return Changed == ChangeStatus::CHANGED; 949 } 950 951 /// Populate the Attributor with abstract attribute opportunities in the 952 /// function. 953 void registerAAs() { 954 if (SCC.empty()) 955 return; 956 957 // Create CallSite AA for all Getters. 958 for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { 959 auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)]; 960 961 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 962 963 auto CreateAA = [&](Use &U, Function &Caller) { 964 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 965 if (!CI) 966 return false; 967 968 auto &CB = cast<CallBase>(*CI); 969 970 IRPosition CBPos = IRPosition::callsite_function(CB); 971 A.getOrCreateAAFor<AAICVTracker>(CBPos); 972 return false; 973 }; 974 975 GetterRFI.foreachUse(SCC, CreateAA); 976 } 977 } 978 }; 979 980 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { 981 if (!OMPInfoCache.ModuleSlice.count(&F)) 982 return nullptr; 983 984 // Use a scope to keep the lifetime of the CachedKernel short. 985 { 986 Optional<Kernel> &CachedKernel = UniqueKernelMap[&F]; 987 if (CachedKernel) 988 return *CachedKernel; 989 990 // TODO: We should use an AA to create an (optimistic and callback 991 // call-aware) call graph. For now we stick to simple patterns that 992 // are less powerful, basically the worst fixpoint. 993 if (isKernel(F)) { 994 CachedKernel = Kernel(&F); 995 return *CachedKernel; 996 } 997 998 CachedKernel = nullptr; 999 if (!F.hasLocalLinkage()) 1000 return nullptr; 1001 } 1002 1003 auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { 1004 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 1005 // Allow use in equality comparisons. 1006 if (Cmp->isEquality()) 1007 return getUniqueKernelFor(*Cmp); 1008 return nullptr; 1009 } 1010 if (auto *CB = dyn_cast<CallBase>(U.getUser())) { 1011 // Allow direct calls. 1012 if (CB->isCallee(&U)) 1013 return getUniqueKernelFor(*CB); 1014 // Allow the use in __kmpc_kernel_prepare_parallel calls. 1015 if (Function *Callee = CB->getCalledFunction()) 1016 if (Callee->getName() == "__kmpc_kernel_prepare_parallel") 1017 return getUniqueKernelFor(*CB); 1018 return nullptr; 1019 } 1020 // Disallow every other use. 1021 return nullptr; 1022 }; 1023 1024 // TODO: In the future we want to track more than just a unique kernel. 1025 SmallPtrSet<Kernel, 2> PotentialKernels; 1026 OMPInformationCache::foreachUse(F, [&](const Use &U) { 1027 PotentialKernels.insert(GetUniqueKernelForUse(U)); 1028 }); 1029 1030 Kernel K = nullptr; 1031 if (PotentialKernels.size() == 1) 1032 K = *PotentialKernels.begin(); 1033 1034 // Cache the result. 1035 UniqueKernelMap[&F] = K; 1036 1037 return K; 1038 } 1039 1040 bool OpenMPOpt::rewriteDeviceCodeStateMachine() { 1041 OMPInformationCache::RuntimeFunctionInfo &KernelPrepareParallelRFI = 1042 OMPInfoCache.RFIs[OMPRTL___kmpc_kernel_prepare_parallel]; 1043 1044 bool Changed = false; 1045 if (!KernelPrepareParallelRFI) 1046 return Changed; 1047 1048 for (Function *F : SCC) { 1049 1050 // Check if the function is uses in a __kmpc_kernel_prepare_parallel call at 1051 // all. 1052 bool UnknownUse = false; 1053 bool KernelPrepareUse = false; 1054 unsigned NumDirectCalls = 0; 1055 1056 SmallVector<Use *, 2> ToBeReplacedStateMachineUses; 1057 OMPInformationCache::foreachUse(*F, [&](Use &U) { 1058 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 1059 if (CB->isCallee(&U)) { 1060 ++NumDirectCalls; 1061 return; 1062 } 1063 1064 if (isa<ICmpInst>(U.getUser())) { 1065 ToBeReplacedStateMachineUses.push_back(&U); 1066 return; 1067 } 1068 if (!KernelPrepareUse && OpenMPOpt::getCallIfRegularCall( 1069 *U.getUser(), &KernelPrepareParallelRFI)) { 1070 KernelPrepareUse = true; 1071 ToBeReplacedStateMachineUses.push_back(&U); 1072 return; 1073 } 1074 UnknownUse = true; 1075 }); 1076 1077 // Do not emit a remark if we haven't seen a __kmpc_kernel_prepare_parallel 1078 // use. 1079 if (!KernelPrepareUse) 1080 continue; 1081 1082 { 1083 auto Remark = [&](OptimizationRemark OR) { 1084 return OR << "Found a parallel region that is called in a target " 1085 "region but not part of a combined target construct nor " 1086 "nesed inside a target construct without intermediate " 1087 "code. This can lead to excessive register usage for " 1088 "unrelated target regions in the same translation unit " 1089 "due to spurious call edges assumed by ptxas."; 1090 }; 1091 emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark); 1092 } 1093 1094 // If this ever hits, we should investigate. 1095 // TODO: Checking the number of uses is not a necessary restriction and 1096 // should be lifted. 1097 if (UnknownUse || NumDirectCalls != 1 || 1098 ToBeReplacedStateMachineUses.size() != 2) { 1099 { 1100 auto Remark = [&](OptimizationRemark OR) { 1101 return OR << "Parallel region is used in " 1102 << (UnknownUse ? "unknown" : "unexpected") 1103 << " ways; will not attempt to rewrite the state machine."; 1104 }; 1105 emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark); 1106 } 1107 continue; 1108 } 1109 1110 // Even if we have __kmpc_kernel_prepare_parallel calls, we (for now) give 1111 // up if the function is not called from a unique kernel. 1112 Kernel K = getUniqueKernelFor(*F); 1113 if (!K) { 1114 { 1115 auto Remark = [&](OptimizationRemark OR) { 1116 return OR << "Parallel region is not known to be called from a " 1117 "unique single target region, maybe the surrounding " 1118 "function has external linkage?; will not attempt to " 1119 "rewrite the state machine use."; 1120 }; 1121 emitRemarkOnFunction(F, "OpenMPParallelRegionInMultipleKernesl", 1122 Remark); 1123 } 1124 continue; 1125 } 1126 1127 // We now know F is a parallel body function called only from the kernel K. 1128 // We also identified the state machine uses in which we replace the 1129 // function pointer by a new global symbol for identification purposes. This 1130 // ensures only direct calls to the function are left. 1131 1132 { 1133 auto RemarkParalleRegion = [&](OptimizationRemark OR) { 1134 return OR << "Specialize parallel region that is only reached from a " 1135 "single target region to avoid spurious call edges and " 1136 "excessive register usage in other target regions. " 1137 "(parallel region ID: " 1138 << ore::NV("OpenMPParallelRegion", F->getName()) 1139 << ", kernel ID: " 1140 << ore::NV("OpenMPTargetRegion", K->getName()) << ")"; 1141 }; 1142 emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", 1143 RemarkParalleRegion); 1144 auto RemarkKernel = [&](OptimizationRemark OR) { 1145 return OR << "Target region containing the parallel region that is " 1146 "specialized. (parallel region ID: " 1147 << ore::NV("OpenMPParallelRegion", F->getName()) 1148 << ", kernel ID: " 1149 << ore::NV("OpenMPTargetRegion", K->getName()) << ")"; 1150 }; 1151 emitRemarkOnFunction(K, "OpenMPParallelRegionInNonSPMD", RemarkKernel); 1152 } 1153 1154 Module &M = *F->getParent(); 1155 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 1156 1157 auto *ID = new GlobalVariable( 1158 M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, 1159 UndefValue::get(Int8Ty), F->getName() + ".ID"); 1160 1161 for (Use *U : ToBeReplacedStateMachineUses) 1162 U->set(ConstantExpr::getBitCast(ID, U->get()->getType())); 1163 1164 ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; 1165 1166 Changed = true; 1167 } 1168 1169 return Changed; 1170 } 1171 1172 /// Abstract Attribute for tracking ICV values. 1173 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 1174 using Base = StateWrapper<BooleanState, AbstractAttribute>; 1175 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 1176 1177 void initialize(Attributor &A) override { 1178 Function *F = getAnchorScope(); 1179 if (!F || !A.isFunctionIPOAmendable(*F)) 1180 indicatePessimisticFixpoint(); 1181 } 1182 1183 /// Returns true if value is assumed to be tracked. 1184 bool isAssumedTracked() const { return getAssumed(); } 1185 1186 /// Returns true if value is known to be tracked. 1187 bool isKnownTracked() const { return getAssumed(); } 1188 1189 /// Create an abstract attribute biew for the position \p IRP. 1190 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 1191 1192 /// Return the value with which \p I can be replaced for specific \p ICV. 1193 virtual Optional<Value *> getReplacementValue(InternalControlVar ICV, 1194 const Instruction *I, 1195 Attributor &A) const { 1196 return None; 1197 } 1198 1199 /// Return an assumed unique ICV value if a single candidate is found. If 1200 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1201 /// Optional::NoneType. 1202 virtual Optional<Value *> 1203 getUniqueReplacementValue(InternalControlVar ICV) const = 0; 1204 1205 // Currently only nthreads is being tracked. 1206 // this array will only grow with time. 1207 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 1208 1209 /// See AbstractAttribute::getName() 1210 const std::string getName() const override { return "AAICVTracker"; } 1211 1212 /// See AbstractAttribute::getIdAddr() 1213 const char *getIdAddr() const override { return &ID; } 1214 1215 /// This function should return true if the type of the \p AA is AAICVTracker 1216 static bool classof(const AbstractAttribute *AA) { 1217 return (AA->getIdAddr() == &ID); 1218 } 1219 1220 static const char ID; 1221 }; 1222 1223 struct AAICVTrackerFunction : public AAICVTracker { 1224 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 1225 : AAICVTracker(IRP, A) {} 1226 1227 // FIXME: come up with better string. 1228 const std::string getAsStr() const override { return "ICVTrackerFunction"; } 1229 1230 // FIXME: come up with some stats. 1231 void trackStatistics() const override {} 1232 1233 /// We don't manifest anything for this AA. 1234 ChangeStatus manifest(Attributor &A) override { 1235 return ChangeStatus::UNCHANGED; 1236 } 1237 1238 // Map of ICV to their values at specific program point. 1239 EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar, 1240 InternalControlVar::ICV___last> 1241 ICVReplacementValuesMap; 1242 1243 ChangeStatus updateImpl(Attributor &A) override { 1244 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 1245 1246 Function *F = getAnchorScope(); 1247 1248 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 1249 1250 for (InternalControlVar ICV : TrackableICVs) { 1251 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 1252 1253 auto &ValuesMap = ICVReplacementValuesMap[ICV]; 1254 auto TrackValues = [&](Use &U, Function &) { 1255 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 1256 if (!CI) 1257 return false; 1258 1259 // FIXME: handle setters with more that 1 arguments. 1260 /// Track new value. 1261 if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) 1262 HasChanged = ChangeStatus::CHANGED; 1263 1264 return false; 1265 }; 1266 1267 auto CallCheck = [&](Instruction &I) { 1268 Optional<Value *> ReplVal = getValueForCall(A, &I, ICV); 1269 if (ReplVal.hasValue() && 1270 ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) 1271 HasChanged = ChangeStatus::CHANGED; 1272 1273 return true; 1274 }; 1275 1276 // Track all changes of an ICV. 1277 SetterRFI.foreachUse(TrackValues, F); 1278 1279 A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, 1280 /* CheckBBLivenessOnly */ true); 1281 1282 /// TODO: Figure out a way to avoid adding entry in 1283 /// ICVReplacementValuesMap 1284 Instruction *Entry = &F->getEntryBlock().front(); 1285 if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) 1286 ValuesMap.insert(std::make_pair(Entry, nullptr)); 1287 } 1288 1289 return HasChanged; 1290 } 1291 1292 /// Hepler to check if \p I is a call and get the value for it if it is 1293 /// unique. 1294 Optional<Value *> getValueForCall(Attributor &A, const Instruction *I, 1295 InternalControlVar &ICV) const { 1296 1297 const auto *CB = dyn_cast<CallBase>(I); 1298 if (!CB) 1299 return None; 1300 1301 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 1302 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 1303 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 1304 Function *CalledFunction = CB->getCalledFunction(); 1305 1306 if (CalledFunction == GetterRFI.Declaration) 1307 return None; 1308 if (CalledFunction == SetterRFI.Declaration) { 1309 if (ICVReplacementValuesMap[ICV].count(I)) 1310 return ICVReplacementValuesMap[ICV].lookup(I); 1311 1312 return nullptr; 1313 } 1314 1315 // Since we don't know, assume it changes the ICV. 1316 if (CalledFunction->isDeclaration()) 1317 return nullptr; 1318 1319 const auto &ICVTrackingAA = 1320 A.getAAFor<AAICVTracker>(*this, IRPosition::callsite_returned(*CB)); 1321 1322 if (ICVTrackingAA.isAssumedTracked()) 1323 return ICVTrackingAA.getUniqueReplacementValue(ICV); 1324 1325 // If we don't know, assume it changes. 1326 return nullptr; 1327 } 1328 1329 // We don't check unique value for a function, so return None. 1330 Optional<Value *> 1331 getUniqueReplacementValue(InternalControlVar ICV) const override { 1332 return None; 1333 } 1334 1335 /// Return the value with which \p I can be replaced for specific \p ICV. 1336 Optional<Value *> getReplacementValue(InternalControlVar ICV, 1337 const Instruction *I, 1338 Attributor &A) const override { 1339 const auto &ValuesMap = ICVReplacementValuesMap[ICV]; 1340 if (ValuesMap.count(I)) 1341 return ValuesMap.lookup(I); 1342 1343 SmallVector<const Instruction *, 16> Worklist; 1344 SmallPtrSet<const Instruction *, 16> Visited; 1345 Worklist.push_back(I); 1346 1347 Optional<Value *> ReplVal; 1348 1349 while (!Worklist.empty()) { 1350 const Instruction *CurrInst = Worklist.pop_back_val(); 1351 if (!Visited.insert(CurrInst).second) 1352 continue; 1353 1354 const BasicBlock *CurrBB = CurrInst->getParent(); 1355 1356 // Go up and look for all potential setters/calls that might change the 1357 // ICV. 1358 while ((CurrInst = CurrInst->getPrevNode())) { 1359 if (ValuesMap.count(CurrInst)) { 1360 Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst); 1361 // Unknown value, track new. 1362 if (!ReplVal.hasValue()) { 1363 ReplVal = NewReplVal; 1364 break; 1365 } 1366 1367 // If we found a new value, we can't know the icv value anymore. 1368 if (NewReplVal.hasValue()) 1369 if (ReplVal != NewReplVal) 1370 return nullptr; 1371 1372 break; 1373 } 1374 1375 Optional<Value *> NewReplVal = getValueForCall(A, CurrInst, ICV); 1376 if (!NewReplVal.hasValue()) 1377 continue; 1378 1379 // Unknown value, track new. 1380 if (!ReplVal.hasValue()) { 1381 ReplVal = NewReplVal; 1382 break; 1383 } 1384 1385 // if (NewReplVal.hasValue()) 1386 // We found a new value, we can't know the icv value anymore. 1387 if (ReplVal != NewReplVal) 1388 return nullptr; 1389 } 1390 1391 // If we are in the same BB and we have a value, we are done. 1392 if (CurrBB == I->getParent() && ReplVal.hasValue()) 1393 return ReplVal; 1394 1395 // Go through all predecessors and add terminators for analysis. 1396 for (const BasicBlock *Pred : predecessors(CurrBB)) 1397 if (const Instruction *Terminator = Pred->getTerminator()) 1398 Worklist.push_back(Terminator); 1399 } 1400 1401 return ReplVal; 1402 } 1403 }; 1404 1405 struct AAICVTrackerFunctionReturned : AAICVTracker { 1406 AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) 1407 : AAICVTracker(IRP, A) {} 1408 1409 // FIXME: come up with better string. 1410 const std::string getAsStr() const override { 1411 return "ICVTrackerFunctionReturned"; 1412 } 1413 1414 // FIXME: come up with some stats. 1415 void trackStatistics() const override {} 1416 1417 /// We don't manifest anything for this AA. 1418 ChangeStatus manifest(Attributor &A) override { 1419 return ChangeStatus::UNCHANGED; 1420 } 1421 1422 // Map of ICV to their values at specific program point. 1423 EnumeratedArray<Optional<Value *>, InternalControlVar, 1424 InternalControlVar::ICV___last> 1425 ICVReplacementValuesMap; 1426 1427 /// Return the value with which \p I can be replaced for specific \p ICV. 1428 Optional<Value *> 1429 getUniqueReplacementValue(InternalControlVar ICV) const override { 1430 return ICVReplacementValuesMap[ICV]; 1431 } 1432 1433 ChangeStatus updateImpl(Attributor &A) override { 1434 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1435 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 1436 *this, IRPosition::function(*getAnchorScope())); 1437 1438 if (!ICVTrackingAA.isAssumedTracked()) 1439 return indicatePessimisticFixpoint(); 1440 1441 for (InternalControlVar ICV : TrackableICVs) { 1442 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 1443 Optional<Value *> UniqueICVValue; 1444 1445 auto CheckReturnInst = [&](Instruction &I) { 1446 Optional<Value *> NewReplVal = 1447 ICVTrackingAA.getReplacementValue(ICV, &I, A); 1448 1449 // If we found a second ICV value there is no unique returned value. 1450 if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal) 1451 return false; 1452 1453 UniqueICVValue = NewReplVal; 1454 1455 return true; 1456 }; 1457 1458 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, 1459 /* CheckBBLivenessOnly */ true)) 1460 UniqueICVValue = nullptr; 1461 1462 if (UniqueICVValue == ReplVal) 1463 continue; 1464 1465 ReplVal = UniqueICVValue; 1466 Changed = ChangeStatus::CHANGED; 1467 } 1468 1469 return Changed; 1470 } 1471 }; 1472 1473 struct AAICVTrackerCallSite : AAICVTracker { 1474 AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) 1475 : AAICVTracker(IRP, A) {} 1476 1477 void initialize(Attributor &A) override { 1478 Function *F = getAnchorScope(); 1479 if (!F || !A.isFunctionIPOAmendable(*F)) 1480 indicatePessimisticFixpoint(); 1481 1482 // We only initialize this AA for getters, so we need to know which ICV it 1483 // gets. 1484 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 1485 for (InternalControlVar ICV : TrackableICVs) { 1486 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 1487 auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; 1488 if (Getter.Declaration == getAssociatedFunction()) { 1489 AssociatedICV = ICVInfo.Kind; 1490 return; 1491 } 1492 } 1493 1494 /// Unknown ICV. 1495 indicatePessimisticFixpoint(); 1496 } 1497 1498 ChangeStatus manifest(Attributor &A) override { 1499 if (!ReplVal.hasValue() || !ReplVal.getValue()) 1500 return ChangeStatus::UNCHANGED; 1501 1502 A.changeValueAfterManifest(*getCtxI(), **ReplVal); 1503 A.deleteAfterManifest(*getCtxI()); 1504 1505 return ChangeStatus::CHANGED; 1506 } 1507 1508 // FIXME: come up with better string. 1509 const std::string getAsStr() const override { return "ICVTrackerCallSite"; } 1510 1511 // FIXME: come up with some stats. 1512 void trackStatistics() const override {} 1513 1514 InternalControlVar AssociatedICV; 1515 Optional<Value *> ReplVal; 1516 1517 ChangeStatus updateImpl(Attributor &A) override { 1518 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 1519 *this, IRPosition::function(*getAnchorScope())); 1520 1521 // We don't have any information, so we assume it changes the ICV. 1522 if (!ICVTrackingAA.isAssumedTracked()) 1523 return indicatePessimisticFixpoint(); 1524 1525 Optional<Value *> NewReplVal = 1526 ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A); 1527 1528 if (ReplVal == NewReplVal) 1529 return ChangeStatus::UNCHANGED; 1530 1531 ReplVal = NewReplVal; 1532 return ChangeStatus::CHANGED; 1533 } 1534 1535 // Return the value with which associated value can be replaced for specific 1536 // \p ICV. 1537 Optional<Value *> 1538 getUniqueReplacementValue(InternalControlVar ICV) const override { 1539 return ReplVal; 1540 } 1541 }; 1542 1543 struct AAICVTrackerCallSiteReturned : AAICVTracker { 1544 AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) 1545 : AAICVTracker(IRP, A) {} 1546 1547 // FIXME: come up with better string. 1548 const std::string getAsStr() const override { 1549 return "ICVTrackerCallSiteReturned"; 1550 } 1551 1552 // FIXME: come up with some stats. 1553 void trackStatistics() const override {} 1554 1555 /// We don't manifest anything for this AA. 1556 ChangeStatus manifest(Attributor &A) override { 1557 return ChangeStatus::UNCHANGED; 1558 } 1559 1560 // Map of ICV to their values at specific program point. 1561 EnumeratedArray<Optional<Value *>, InternalControlVar, 1562 InternalControlVar::ICV___last> 1563 ICVReplacementValuesMap; 1564 1565 /// Return the value with which associated value can be replaced for specific 1566 /// \p ICV. 1567 Optional<Value *> 1568 getUniqueReplacementValue(InternalControlVar ICV) const override { 1569 return ICVReplacementValuesMap[ICV]; 1570 } 1571 1572 ChangeStatus updateImpl(Attributor &A) override { 1573 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1574 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 1575 *this, IRPosition::returned(*getAssociatedFunction())); 1576 1577 // We don't have any information, so we assume it changes the ICV. 1578 if (!ICVTrackingAA.isAssumedTracked()) 1579 return indicatePessimisticFixpoint(); 1580 1581 for (InternalControlVar ICV : TrackableICVs) { 1582 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 1583 Optional<Value *> NewReplVal = 1584 ICVTrackingAA.getUniqueReplacementValue(ICV); 1585 1586 if (ReplVal == NewReplVal) 1587 continue; 1588 1589 ReplVal = NewReplVal; 1590 Changed = ChangeStatus::CHANGED; 1591 } 1592 return Changed; 1593 } 1594 }; 1595 } // namespace 1596 1597 const char AAICVTracker::ID = 0; 1598 1599 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 1600 Attributor &A) { 1601 AAICVTracker *AA = nullptr; 1602 switch (IRP.getPositionKind()) { 1603 case IRPosition::IRP_INVALID: 1604 case IRPosition::IRP_FLOAT: 1605 case IRPosition::IRP_ARGUMENT: 1606 case IRPosition::IRP_CALL_SITE_ARGUMENT: 1607 llvm_unreachable("ICVTracker can only be created for function position!"); 1608 case IRPosition::IRP_RETURNED: 1609 AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); 1610 break; 1611 case IRPosition::IRP_CALL_SITE_RETURNED: 1612 AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); 1613 break; 1614 case IRPosition::IRP_CALL_SITE: 1615 AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); 1616 break; 1617 case IRPosition::IRP_FUNCTION: 1618 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 1619 break; 1620 } 1621 1622 return *AA; 1623 } 1624 1625 PreservedAnalyses OpenMPOptPass::run(LazyCallGraph::SCC &C, 1626 CGSCCAnalysisManager &AM, 1627 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 1628 if (!containsOpenMP(*C.begin()->getFunction().getParent(), OMPInModule)) 1629 return PreservedAnalyses::all(); 1630 1631 if (DisableOpenMPOptimizations) 1632 return PreservedAnalyses::all(); 1633 1634 SmallVector<Function *, 16> SCC; 1635 // If there are kernels in the module, we have to run on all SCC's. 1636 bool SCCIsInteresting = !OMPInModule.getKernels().empty(); 1637 for (LazyCallGraph::Node &N : C) { 1638 Function *Fn = &N.getFunction(); 1639 SCC.push_back(Fn); 1640 1641 // Do we already know that the SCC contains kernels, 1642 // or that OpenMP functions are called from this SCC? 1643 if (SCCIsInteresting) 1644 continue; 1645 // If not, let's check that. 1646 SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn); 1647 } 1648 1649 if (!SCCIsInteresting || SCC.empty()) 1650 return PreservedAnalyses::all(); 1651 1652 FunctionAnalysisManager &FAM = 1653 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1654 1655 AnalysisGetter AG(FAM); 1656 1657 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 1658 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 1659 }; 1660 1661 CallGraphUpdater CGUpdater; 1662 CGUpdater.initialize(CG, C, AM, UR); 1663 1664 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 1665 BumpPtrAllocator Allocator; 1666 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 1667 /*CGSCC*/ Functions, OMPInModule.getKernels()); 1668 1669 Attributor A(Functions, InfoCache, CGUpdater); 1670 1671 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 1672 bool Changed = OMPOpt.run(); 1673 if (Changed) 1674 return PreservedAnalyses::none(); 1675 1676 return PreservedAnalyses::all(); 1677 } 1678 1679 namespace { 1680 1681 struct OpenMPOptLegacyPass : public CallGraphSCCPass { 1682 CallGraphUpdater CGUpdater; 1683 OpenMPInModule OMPInModule; 1684 static char ID; 1685 1686 OpenMPOptLegacyPass() : CallGraphSCCPass(ID) { 1687 initializeOpenMPOptLegacyPassPass(*PassRegistry::getPassRegistry()); 1688 } 1689 1690 void getAnalysisUsage(AnalysisUsage &AU) const override { 1691 CallGraphSCCPass::getAnalysisUsage(AU); 1692 } 1693 1694 bool doInitialization(CallGraph &CG) override { 1695 // Disable the pass if there is no OpenMP (runtime call) in the module. 1696 containsOpenMP(CG.getModule(), OMPInModule); 1697 return false; 1698 } 1699 1700 bool runOnSCC(CallGraphSCC &CGSCC) override { 1701 if (!containsOpenMP(CGSCC.getCallGraph().getModule(), OMPInModule)) 1702 return false; 1703 if (DisableOpenMPOptimizations || skipSCC(CGSCC)) 1704 return false; 1705 1706 SmallVector<Function *, 16> SCC; 1707 // If there are kernels in the module, we have to run on all SCC's. 1708 bool SCCIsInteresting = !OMPInModule.getKernels().empty(); 1709 for (CallGraphNode *CGN : CGSCC) { 1710 Function *Fn = CGN->getFunction(); 1711 if (!Fn || Fn->isDeclaration()) 1712 continue; 1713 SCC.push_back(Fn); 1714 1715 // Do we already know that the SCC contains kernels, 1716 // or that OpenMP functions are called from this SCC? 1717 if (SCCIsInteresting) 1718 continue; 1719 // If not, let's check that. 1720 SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn); 1721 } 1722 1723 if (!SCCIsInteresting || SCC.empty()) 1724 return false; 1725 1726 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1727 CGUpdater.initialize(CG, CGSCC); 1728 1729 // Maintain a map of functions to avoid rebuilding the ORE 1730 DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap; 1731 auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & { 1732 std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F]; 1733 if (!ORE) 1734 ORE = std::make_unique<OptimizationRemarkEmitter>(F); 1735 return *ORE; 1736 }; 1737 1738 AnalysisGetter AG; 1739 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 1740 BumpPtrAllocator Allocator; 1741 OMPInformationCache InfoCache( 1742 *(Functions.back()->getParent()), AG, Allocator, 1743 /*CGSCC*/ Functions, OMPInModule.getKernels()); 1744 1745 Attributor A(Functions, InfoCache, CGUpdater); 1746 1747 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 1748 return OMPOpt.run(); 1749 } 1750 1751 bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } 1752 }; 1753 1754 } // end anonymous namespace 1755 1756 void OpenMPInModule::identifyKernels(Module &M) { 1757 1758 NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 1759 if (!MD) 1760 return; 1761 1762 for (auto *Op : MD->operands()) { 1763 if (Op->getNumOperands() < 2) 1764 continue; 1765 MDString *KindID = dyn_cast<MDString>(Op->getOperand(1)); 1766 if (!KindID || KindID->getString() != "kernel") 1767 continue; 1768 1769 Function *KernelFn = 1770 mdconst::dyn_extract_or_null<Function>(Op->getOperand(0)); 1771 if (!KernelFn) 1772 continue; 1773 1774 ++NumOpenMPTargetRegionKernels; 1775 1776 Kernels.insert(KernelFn); 1777 } 1778 } 1779 1780 bool llvm::omp::containsOpenMP(Module &M, OpenMPInModule &OMPInModule) { 1781 if (OMPInModule.isKnown()) 1782 return OMPInModule; 1783 1784 auto RecordFunctionsContainingUsesOf = [&](Function *F) { 1785 for (User *U : F->users()) 1786 if (auto *I = dyn_cast<Instruction>(U)) 1787 OMPInModule.FuncsWithOMPRuntimeCalls.insert(I->getFunction()); 1788 }; 1789 1790 // MSVC doesn't like long if-else chains for some reason and instead just 1791 // issues an error. Work around it.. 1792 do { 1793 #define OMP_RTL(_Enum, _Name, ...) \ 1794 if (Function *F = M.getFunction(_Name)) { \ 1795 RecordFunctionsContainingUsesOf(F); \ 1796 OMPInModule = true; \ 1797 } 1798 #include "llvm/Frontend/OpenMP/OMPKinds.def" 1799 } while (false); 1800 1801 // Identify kernels once. TODO: We should split the OMPInformationCache into a 1802 // module and an SCC part. The kernel information, among other things, could 1803 // go into the module part. 1804 if (OMPInModule.isKnown() && OMPInModule) { 1805 OMPInModule.identifyKernels(M); 1806 return true; 1807 } 1808 1809 return OMPInModule = false; 1810 } 1811 1812 char OpenMPOptLegacyPass::ID = 0; 1813 1814 INITIALIZE_PASS_BEGIN(OpenMPOptLegacyPass, "openmpopt", 1815 "OpenMP specific optimizations", false, false) 1816 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1817 INITIALIZE_PASS_END(OpenMPOptLegacyPass, "openmpopt", 1818 "OpenMP specific optimizations", false, false) 1819 1820 Pass *llvm::createOpenMPOptLegacyPass() { return new OpenMPOptLegacyPass(); } 1821