1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/OpenMPOpt.h" 16 17 #include "llvm/ADT/EnumeratedArray.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/CallGraph.h" 20 #include "llvm/Analysis/CallGraphSCCPass.h" 21 #include "llvm/Frontend/OpenMP/OMPConstants.h" 22 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 23 #include "llvm/IR/CallSite.h" 24 #include "llvm/InitializePasses.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Transforms/IPO.h" 27 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 28 29 using namespace llvm; 30 using namespace omp; 31 using namespace types; 32 33 #define DEBUG_TYPE "openmp-opt" 34 35 static cl::opt<bool> DisableOpenMPOptimizations( 36 "openmp-opt-disable", cl::ZeroOrMore, 37 cl::desc("Disable OpenMP specific optimizations."), cl::Hidden, 38 cl::init(false)); 39 40 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 41 "Number of OpenMP runtime calls deduplicated"); 42 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 43 "Number of OpenMP runtime functions identified"); 44 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 45 "Number of OpenMP runtime function uses identified"); 46 47 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 48 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 49 #endif 50 51 namespace { 52 struct OpenMPOpt { 53 54 OpenMPOpt(SmallPtrSetImpl<Function *> &SCC, 55 SmallPtrSetImpl<Function *> &ModuleSlice, 56 CallGraphUpdater &CGUpdater) 57 : M(*(*SCC.begin())->getParent()), SCC(SCC), ModuleSlice(ModuleSlice), 58 OMPBuilder(M), CGUpdater(CGUpdater) { 59 initializeTypes(M); 60 initializeRuntimeFunctions(); 61 OMPBuilder.initialize(); 62 } 63 64 /// Generic information that describes a runtime function 65 struct RuntimeFunctionInfo { 66 /// The kind, as described by the RuntimeFunction enum. 67 RuntimeFunction Kind; 68 69 /// The name of the function. 70 StringRef Name; 71 72 /// Flag to indicate a variadic function. 73 bool IsVarArg; 74 75 /// The return type of the function. 76 Type *ReturnType; 77 78 /// The argument types of the function. 79 SmallVector<Type *, 8> ArgumentTypes; 80 81 /// The declaration if available. 82 Function *Declaration; 83 84 /// Uses of this runtime function per function containing the use. 85 DenseMap<Function *, SmallPtrSet<Use *, 16>> UsesMap; 86 87 /// Return the number of arguments (or the minimal number for variadic 88 /// functions). 89 size_t getNumArgs() const { return ArgumentTypes.size(); } 90 91 /// Run the callback \p CB on each use and forget the use if the result is 92 /// true. The callback will be fed the function in which the use was 93 /// encountered as second argument. 94 void foreachUse(function_ref<bool(Use &, Function &)> CB) { 95 SmallVector<Use *, 8> ToBeDeleted; 96 for (auto &It : UsesMap) { 97 ToBeDeleted.clear(); 98 for (Use *U : It.second) 99 if (CB(*U, *It.first)) 100 ToBeDeleted.push_back(U); 101 for (Use *U : ToBeDeleted) 102 It.second.erase(U); 103 } 104 } 105 }; 106 107 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 108 bool run() { 109 bool Changed = false; 110 111 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 112 << " functions in a slice with " << ModuleSlice.size() 113 << " functions\n"); 114 115 Changed |= deduplicateRuntimeCalls(); 116 Changed |= deleteParallelRegions(); 117 118 return Changed; 119 } 120 121 private: 122 /// Try to delete parallel regions if possible 123 bool deleteParallelRegions() { 124 const unsigned CallbackCalleeOperand = 2; 125 126 RuntimeFunctionInfo &RFI = RFIs[OMPRTL___kmpc_fork_call]; 127 if (!RFI.Declaration) 128 return false; 129 130 bool Changed = false; 131 auto DeleteCallCB = [&](Use &U, Function &) { 132 CallInst *CI = getCallIfRegularCall(U); 133 if (!CI) 134 return false; 135 auto *Fn = dyn_cast<Function>( 136 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 137 if (!Fn) 138 return false; 139 if (!Fn->onlyReadsMemory()) 140 return false; 141 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 142 return false; 143 144 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 145 << CI->getCaller()->getName() << "\n"); 146 CGUpdater.removeCallSite(*CI); 147 CI->eraseFromParent(); 148 Changed = true; 149 return true; 150 }; 151 152 RFI.foreachUse(DeleteCallCB); 153 154 return Changed; 155 } 156 157 /// Try to eliminiate runtime calls by reusing existing ones. 158 bool deduplicateRuntimeCalls() { 159 bool Changed = false; 160 161 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 162 OMPRTL_omp_get_num_threads, 163 OMPRTL_omp_in_parallel, 164 OMPRTL_omp_get_cancellation, 165 OMPRTL_omp_get_thread_limit, 166 OMPRTL_omp_get_supported_active_levels, 167 OMPRTL_omp_get_level, 168 OMPRTL_omp_get_ancestor_thread_num, 169 OMPRTL_omp_get_team_size, 170 OMPRTL_omp_get_active_level, 171 OMPRTL_omp_in_final, 172 OMPRTL_omp_get_proc_bind, 173 OMPRTL_omp_get_num_places, 174 OMPRTL_omp_get_num_procs, 175 OMPRTL_omp_get_place_num, 176 OMPRTL_omp_get_partition_num_places, 177 OMPRTL_omp_get_partition_place_nums}; 178 179 // Global-tid is handled separatly. 180 SmallSetVector<Value *, 16> GTIdArgs; 181 collectGlobalThreadIdArguments(GTIdArgs); 182 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 183 << " global thread ID arguments\n"); 184 185 for (Function *F : SCC) { 186 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 187 deduplicateRuntimeCalls(*F, RFIs[DeduplicableRuntimeCallID]); 188 189 // __kmpc_global_thread_num is special as we can replace it with an 190 // argument in enough cases to make it worth trying. 191 Value *GTIdArg = nullptr; 192 for (Argument &Arg : F->args()) 193 if (GTIdArgs.count(&Arg)) { 194 GTIdArg = &Arg; 195 break; 196 } 197 Changed |= deduplicateRuntimeCalls( 198 *F, RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 199 } 200 201 return Changed; 202 } 203 204 /// Try to eliminiate calls of \p RFI in \p F by reusing an existing one or 205 /// \p ReplVal if given. 206 bool deduplicateRuntimeCalls(Function &F, RuntimeFunctionInfo &RFI, 207 Value *ReplVal = nullptr) { 208 auto &Uses = RFI.UsesMap[&F]; 209 if (Uses.size() + (ReplVal != nullptr) < 2) 210 return false; 211 212 LLVM_DEBUG(dbgs() << TAG << "Deduplicate " << Uses.size() << " uses of " 213 << RFI.Name 214 << (ReplVal ? " with an existing value\n" : "\n") 215 << "\n"); 216 assert((!ReplVal || (isa<Argument>(ReplVal) && 217 cast<Argument>(ReplVal)->getParent() == &F)) && 218 "Unexpected replacement value!"); 219 if (!ReplVal) { 220 for (Use *U : Uses) 221 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 222 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 223 ReplVal = CI; 224 break; 225 } 226 if (!ReplVal) 227 return false; 228 } 229 230 bool Changed = false; 231 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 232 CallInst *CI = getCallIfRegularCall(U, &RFI); 233 if (!CI || CI == ReplVal || &F != &Caller) 234 return false; 235 assert(CI->getCaller() == &F && "Unexpected call!"); 236 CGUpdater.removeCallSite(*CI); 237 CI->replaceAllUsesWith(ReplVal); 238 CI->eraseFromParent(); 239 ++NumOpenMPRuntimeCallsDeduplicated; 240 Changed = true; 241 return true; 242 }; 243 RFI.foreachUse(ReplaceAndDeleteCB); 244 245 return Changed; 246 } 247 248 /// Collect arguments that represent the global thread id in \p GTIdArgs. 249 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 250 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 251 // initialization. We could define an AbstractAttribute instead and 252 // run the Attributor here once it can be run as an SCC pass. 253 254 // Helper to check the argument \p ArgNo at all call sites of \p F for 255 // a GTId. 256 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 257 if (!F.hasLocalLinkage()) 258 return false; 259 for (Use &U : F.uses()) { 260 if (CallInst *CI = getCallIfRegularCall(U)) { 261 Value *ArgOp = CI->getArgOperand(ArgNo); 262 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 263 getCallIfRegularCall(*ArgOp, 264 &RFIs[OMPRTL___kmpc_global_thread_num])) 265 continue; 266 } 267 return false; 268 } 269 return true; 270 }; 271 272 // Helper to identify uses of a GTId as GTId arguments. 273 auto AddUserArgs = [&](Value >Id) { 274 for (Use &U : GTId.uses()) 275 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 276 if (CI->isArgOperand(&U)) 277 if (Function *Callee = CI->getCalledFunction()) 278 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 279 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 280 }; 281 282 // The argument users of __kmpc_global_thread_num calls are GTIds. 283 RuntimeFunctionInfo &GlobThreadNumRFI = 284 RFIs[OMPRTL___kmpc_global_thread_num]; 285 for (auto &It : GlobThreadNumRFI.UsesMap) 286 for (Use *U : It.second) 287 if (CallInst *CI = getCallIfRegularCall(*U, &GlobThreadNumRFI)) 288 AddUserArgs(*CI); 289 290 // Transitively search for more arguments by looking at the users of the 291 // ones we know already. During the search the GTIdArgs vector is extended 292 // so we cannot cache the size nor can we use a range based for. 293 for (unsigned u = 0; u < GTIdArgs.size(); ++u) 294 AddUserArgs(*GTIdArgs[u]); 295 } 296 297 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 298 /// given it has to be the callee or a nullptr is returned. 299 CallInst *getCallIfRegularCall(Use &U, RuntimeFunctionInfo *RFI = nullptr) { 300 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 301 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 302 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 303 return CI; 304 return nullptr; 305 } 306 307 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 308 /// the callee or a nullptr is returned. 309 CallInst *getCallIfRegularCall(Value &V, RuntimeFunctionInfo *RFI = nullptr) { 310 CallInst *CI = dyn_cast<CallInst>(&V); 311 if (CI && !CI->hasOperandBundles() && 312 (!RFI || CI->getCalledFunction() == RFI->Declaration)) 313 return CI; 314 return nullptr; 315 } 316 317 /// Helper to initialize all runtime function information for those defined in 318 /// OpenMPKinds.def. 319 void initializeRuntimeFunctions() { 320 // Helper to collect all uses of the decleration in the UsesMap. 321 auto CollectUses = [&](RuntimeFunctionInfo &RFI) { 322 unsigned NumUses = 0; 323 if (!RFI.Declaration) 324 return NumUses; 325 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 326 327 NumOpenMPRuntimeFunctionsIdentified += 1; 328 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 329 330 // TODO: We directly convert uses into proper calls and unknown uses. 331 for (Use &U : RFI.Declaration->uses()) { 332 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 333 if (ModuleSlice.count(UserI->getFunction())) { 334 RFI.UsesMap[UserI->getFunction()].insert(&U); 335 ++NumUses; 336 } 337 } else { 338 RFI.UsesMap[nullptr].insert(&U); 339 ++NumUses; 340 } 341 } 342 return NumUses; 343 }; 344 345 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 346 { \ 347 auto &RFI = RFIs[_Enum]; \ 348 RFI.Kind = _Enum; \ 349 RFI.Name = _Name; \ 350 RFI.IsVarArg = _IsVarArg; \ 351 RFI.ReturnType = _ReturnType; \ 352 RFI.ArgumentTypes = SmallVector<Type *, 8>({__VA_ARGS__}); \ 353 RFI.Declaration = M.getFunction(_Name); \ 354 unsigned NumUses = CollectUses(RFI); \ 355 (void)NumUses; \ 356 LLVM_DEBUG({ \ 357 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 358 << " found\n"; \ 359 if (RFI.Declaration) \ 360 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 361 << RFI.UsesMap.size() << " different functions.\n"; \ 362 }); \ 363 } 364 #include "llvm/Frontend/OpenMP/OMPKinds.def" 365 366 // TODO: We should validate the declaration agains the types we expect. 367 // TODO: We should attach the attributes defined in OMPKinds.def. 368 } 369 370 /// The underyling module. 371 Module &M; 372 373 /// The SCC we are operating on. 374 SmallPtrSetImpl<Function *> &SCC; 375 376 /// The slice of the module we are allowed to look at. 377 SmallPtrSetImpl<Function *> &ModuleSlice; 378 379 /// An OpenMP-IR-Builder instance 380 OpenMPIRBuilder OMPBuilder; 381 382 /// Callback to update the call graph, the first argument is a removed call, 383 /// the second an optional replacement call. 384 CallGraphUpdater &CGUpdater; 385 386 /// Map from runtime function kind to the runtime function description. 387 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 388 RuntimeFunction::OMPRTL___last> 389 RFIs; 390 }; 391 } // namespace 392 393 PreservedAnalyses OpenMPOptPass::run(LazyCallGraph::SCC &C, 394 CGSCCAnalysisManager &AM, 395 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 396 if (!containsOpenMP(*C.begin()->getFunction().getParent(), OMPInModule)) 397 return PreservedAnalyses::all(); 398 399 if (DisableOpenMPOptimizations) 400 return PreservedAnalyses::all(); 401 402 SmallPtrSet<Function *, 16> SCC; 403 for (LazyCallGraph::Node &N : C) 404 SCC.insert(&N.getFunction()); 405 406 if (SCC.empty()) 407 return PreservedAnalyses::all(); 408 409 CallGraphUpdater CGUpdater; 410 CGUpdater.initialize(CG, C, AM, UR); 411 // TODO: Compute the module slice we are allowed to look at. 412 OpenMPOpt OMPOpt(SCC, SCC, CGUpdater); 413 bool Changed = OMPOpt.run(); 414 (void)Changed; 415 return PreservedAnalyses::all(); 416 } 417 418 namespace { 419 420 struct OpenMPOptLegacyPass : public CallGraphSCCPass { 421 CallGraphUpdater CGUpdater; 422 OpenMPInModule OMPInModule; 423 static char ID; 424 425 OpenMPOptLegacyPass() : CallGraphSCCPass(ID) { 426 initializeOpenMPOptLegacyPassPass(*PassRegistry::getPassRegistry()); 427 } 428 429 void getAnalysisUsage(AnalysisUsage &AU) const override { 430 CallGraphSCCPass::getAnalysisUsage(AU); 431 } 432 433 bool doInitialization(CallGraph &CG) override { 434 // Disable the pass if there is no OpenMP (runtime call) in the module. 435 containsOpenMP(CG.getModule(), OMPInModule); 436 return false; 437 } 438 439 bool runOnSCC(CallGraphSCC &CGSCC) override { 440 if (!containsOpenMP(CGSCC.getCallGraph().getModule(), OMPInModule)) 441 return false; 442 if (DisableOpenMPOptimizations || skipSCC(CGSCC)) 443 return false; 444 445 SmallPtrSet<Function *, 16> SCC; 446 for (CallGraphNode *CGN : CGSCC) 447 if (Function *Fn = CGN->getFunction()) 448 if (!Fn->isDeclaration()) 449 SCC.insert(Fn); 450 451 if (SCC.empty()) 452 return false; 453 454 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 455 CGUpdater.initialize(CG, CGSCC); 456 457 // TODO: Compute the module slice we are allowed to look at. 458 OpenMPOpt OMPOpt(SCC, SCC, CGUpdater); 459 return OMPOpt.run(); 460 } 461 462 bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } 463 }; 464 465 } // end anonymous namespace 466 467 bool llvm::omp::containsOpenMP(Module &M, OpenMPInModule &OMPInModule) { 468 if (OMPInModule.isKnown()) 469 return OMPInModule; 470 471 #define OMP_RTL(_Enum, _Name, ...) \ 472 if (M.getFunction(_Name)) \ 473 return OMPInModule = true; 474 #include "llvm/Frontend/OpenMP/OMPKinds.def" 475 return OMPInModule = false; 476 } 477 478 char OpenMPOptLegacyPass::ID = 0; 479 480 INITIALIZE_PASS_BEGIN(OpenMPOptLegacyPass, "openmpopt", 481 "OpenMP specific optimizations", false, false) 482 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 483 INITIALIZE_PASS_END(OpenMPOptLegacyPass, "openmpopt", 484 "OpenMP specific optimizations", false, false) 485 486 Pass *llvm::createOpenMPOptLegacyPass() { return new OpenMPOptLegacyPass(); } 487