1 //===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// This file implements the OpenMPIRBuilder class, which is used as a 11 /// convenient way to create LLVM instructions for OpenMP directives. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Triple.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/CodeMetrics.h" 21 #include "llvm/Analysis/LoopInfo.h" 22 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 23 #include "llvm/Analysis/ScalarEvolution.h" 24 #include "llvm/Analysis/TargetLibraryInfo.h" 25 #include "llvm/IR/CFG.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DebugInfo.h" 28 #include "llvm/IR/GlobalVariable.h" 29 #include "llvm/IR/IRBuilder.h" 30 #include "llvm/IR/MDBuilder.h" 31 #include "llvm/IR/PassManager.h" 32 #include "llvm/IR/Value.h" 33 #include "llvm/MC/TargetRegistry.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Error.h" 36 #include "llvm/Target/TargetMachine.h" 37 #include "llvm/Target/TargetOptions.h" 38 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 39 #include "llvm/Transforms/Utils/CodeExtractor.h" 40 #include "llvm/Transforms/Utils/LoopPeel.h" 41 #include "llvm/Transforms/Utils/ModuleUtils.h" 42 #include "llvm/Transforms/Utils/UnrollLoop.h" 43 44 #include <cstdint> 45 #include <sstream> 46 47 #define DEBUG_TYPE "openmp-ir-builder" 48 49 using namespace llvm; 50 using namespace omp; 51 52 static cl::opt<bool> 53 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden, 54 cl::desc("Use optimistic attributes describing " 55 "'as-if' properties of runtime calls."), 56 cl::init(false)); 57 58 static cl::opt<double> UnrollThresholdFactor( 59 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden, 60 cl::desc("Factor for the unroll threshold to account for code " 61 "simplifications still taking place"), 62 cl::init(1.5)); 63 64 #ifndef NDEBUG 65 /// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions 66 /// at position IP1 may change the meaning of IP2 or vice-versa. This is because 67 /// an InsertPoint stores the instruction before something is inserted. For 68 /// instance, if both point to the same instruction, two IRBuilders alternating 69 /// creating instruction will cause the instructions to be interleaved. 70 static bool isConflictIP(IRBuilder<>::InsertPoint IP1, 71 IRBuilder<>::InsertPoint IP2) { 72 if (!IP1.isSet() || !IP2.isSet()) 73 return false; 74 return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint(); 75 } 76 #endif 77 78 /// Make \p Source branch to \p Target. 79 /// 80 /// Handles two situations: 81 /// * \p Source already has an unconditional branch. 82 /// * \p Source is a degenerate block (no terminator because the BB is 83 /// the current head of the IR construction). 84 static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) { 85 if (Instruction *Term = Source->getTerminator()) { 86 auto *Br = cast<BranchInst>(Term); 87 assert(!Br->isConditional() && 88 "BB's terminator must be an unconditional branch (or degenerate)"); 89 BasicBlock *Succ = Br->getSuccessor(0); 90 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true); 91 Br->setSuccessor(0, Target); 92 return; 93 } 94 95 auto *NewBr = BranchInst::Create(Target, Source); 96 NewBr->setDebugLoc(DL); 97 } 98 99 /// Move the instruction after an InsertPoint to the beginning of another 100 /// BasicBlock. 101 /// 102 /// The instructions after \p IP are moved to the beginning of \p New which must 103 /// not have any PHINodes. If \p CreateBranch is true, a branch instruction to 104 /// \p New will be added such that there is no semantic change. Otherwise, the 105 /// \p IP insert block remains degenerate and it is up to the caller to insert a 106 /// terminator. 107 static void spliceBB(OpenMPIRBuilder::InsertPointTy IP, BasicBlock *New, 108 bool CreateBranch) { 109 assert(New->getFirstInsertionPt() == New->begin() && 110 "Target BB must not have PHI nodes"); 111 112 // Move instructions to new block. 113 BasicBlock *Old = IP.getBlock(); 114 New->getInstList().splice(New->begin(), Old->getInstList(), IP.getPoint(), 115 Old->end()); 116 117 if (CreateBranch) 118 BranchInst::Create(New, Old); 119 } 120 121 /// Splice a BasicBlock at an IRBuilder's current insertion point. Its new 122 /// insert location will stick to after the instruction before the insertion 123 /// point (instead of moving with the instruction the InsertPoint stores 124 /// internally). 125 static void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) { 126 DebugLoc DebugLoc = Builder.getCurrentDebugLocation(); 127 BasicBlock *Old = Builder.GetInsertBlock(); 128 129 spliceBB(Builder.saveIP(), New, CreateBranch); 130 if (CreateBranch) 131 Builder.SetInsertPoint(Old->getTerminator()); 132 else 133 Builder.SetInsertPoint(Old); 134 135 // SetInsertPoint also updates the Builder's debug location, but we want to 136 // keep the one the Builder was configured to use. 137 Builder.SetCurrentDebugLocation(DebugLoc); 138 } 139 140 /// Split a BasicBlock at an InsertPoint, even if the block is degenerate 141 /// (missing the terminator). 142 /// 143 /// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed 144 /// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch 145 /// is true, a branch to the new successor will new created such that 146 /// semantically there is no change; otherwise the block of the insertion point 147 /// remains degenerate and it is the caller's responsibility to insert a 148 /// terminator. Returns the new successor block. 149 static BasicBlock *splitBB(OpenMPIRBuilder::InsertPointTy IP, bool CreateBranch, 150 llvm::Twine Name = {}) { 151 BasicBlock *Old = IP.getBlock(); 152 BasicBlock *New = BasicBlock::Create( 153 Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name, 154 Old->getParent(), Old->getNextNode()); 155 spliceBB(IP, New, CreateBranch); 156 New->replaceSuccessorsPhiUsesWith(Old, New); 157 return New; 158 } 159 160 /// Split a BasicBlock at \p Builder's insertion point, even if the block is 161 /// degenerate (missing the terminator). Its new insert location will stick to 162 /// after the instruction before the insertion point (instead of moving with the 163 /// instruction the InsertPoint stores internally). 164 static BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, 165 llvm::Twine Name = {}) { 166 DebugLoc DebugLoc = Builder.getCurrentDebugLocation(); 167 BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name); 168 if (CreateBranch) 169 Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator()); 170 else 171 Builder.SetInsertPoint(Builder.GetInsertBlock()); 172 // SetInsertPoint also updates the Builder's debug location, but we want to 173 // keep the one the Builder was configured to use. 174 Builder.SetCurrentDebugLocation(DebugLoc); 175 return New; 176 } 177 178 void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) { 179 LLVMContext &Ctx = Fn.getContext(); 180 181 // Get the function's current attributes. 182 auto Attrs = Fn.getAttributes(); 183 auto FnAttrs = Attrs.getFnAttrs(); 184 auto RetAttrs = Attrs.getRetAttrs(); 185 SmallVector<AttributeSet, 4> ArgAttrs; 186 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo) 187 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo)); 188 189 #define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet; 190 #include "llvm/Frontend/OpenMP/OMPKinds.def" 191 192 // Add attributes to the function declaration. 193 switch (FnID) { 194 #define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \ 195 case Enum: \ 196 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \ 197 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \ 198 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \ 199 ArgAttrs[ArgNo] = \ 200 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \ 201 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \ 202 break; 203 #include "llvm/Frontend/OpenMP/OMPKinds.def" 204 default: 205 // Attributes are optional. 206 break; 207 } 208 } 209 210 FunctionCallee 211 OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) { 212 FunctionType *FnTy = nullptr; 213 Function *Fn = nullptr; 214 215 // Try to find the declation in the module first. 216 switch (FnID) { 217 #define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \ 218 case Enum: \ 219 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \ 220 IsVarArg); \ 221 Fn = M.getFunction(Str); \ 222 break; 223 #include "llvm/Frontend/OpenMP/OMPKinds.def" 224 } 225 226 if (!Fn) { 227 // Create a new declaration if we need one. 228 switch (FnID) { 229 #define OMP_RTL(Enum, Str, ...) \ 230 case Enum: \ 231 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \ 232 break; 233 #include "llvm/Frontend/OpenMP/OMPKinds.def" 234 } 235 236 // Add information if the runtime function takes a callback function 237 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) { 238 if (!Fn->hasMetadata(LLVMContext::MD_callback)) { 239 LLVMContext &Ctx = Fn->getContext(); 240 MDBuilder MDB(Ctx); 241 // Annotate the callback behavior of the runtime function: 242 // - The callback callee is argument number 2 (microtask). 243 // - The first two arguments of the callback callee are unknown (-1). 244 // - All variadic arguments to the runtime function are passed to the 245 // callback callee. 246 Fn->addMetadata( 247 LLVMContext::MD_callback, 248 *MDNode::get(Ctx, {MDB.createCallbackEncoding( 249 2, {-1, -1}, /* VarArgsArePassed */ true)})); 250 } 251 } 252 253 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName() 254 << " with type " << *Fn->getFunctionType() << "\n"); 255 addAttributes(FnID, *Fn); 256 257 } else { 258 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName() 259 << " with type " << *Fn->getFunctionType() << "\n"); 260 } 261 262 assert(Fn && "Failed to create OpenMP runtime function"); 263 264 // Cast the function to the expected type if necessary 265 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo()); 266 return {FnTy, C}; 267 } 268 269 Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) { 270 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID); 271 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee()); 272 assert(Fn && "Failed to create OpenMP runtime function pointer"); 273 return Fn; 274 } 275 276 void OpenMPIRBuilder::initialize() { initializeTypes(M); } 277 278 void OpenMPIRBuilder::finalize(Function *Fn) { 279 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 280 SmallVector<BasicBlock *, 32> Blocks; 281 SmallVector<OutlineInfo, 16> DeferredOutlines; 282 for (OutlineInfo &OI : OutlineInfos) { 283 // Skip functions that have not finalized yet; may happen with nested 284 // function generation. 285 if (Fn && OI.getFunction() != Fn) { 286 DeferredOutlines.push_back(OI); 287 continue; 288 } 289 290 ParallelRegionBlockSet.clear(); 291 Blocks.clear(); 292 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 293 294 Function *OuterFn = OI.getFunction(); 295 CodeExtractorAnalysisCache CEAC(*OuterFn); 296 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 297 /* AggregateArgs */ true, 298 /* BlockFrequencyInfo */ nullptr, 299 /* BranchProbabilityInfo */ nullptr, 300 /* AssumptionCache */ nullptr, 301 /* AllowVarArgs */ true, 302 /* AllowAlloca */ true, 303 /* Suffix */ ".omp_par"); 304 305 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n"); 306 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName() 307 << " Exit: " << OI.ExitBB->getName() << "\n"); 308 assert(Extractor.isEligible() && 309 "Expected OpenMP outlining to be possible!"); 310 311 for (auto *V : OI.ExcludeArgsFromAggregate) 312 Extractor.excludeArgFromAggregate(V); 313 314 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC); 315 316 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n"); 317 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n"); 318 assert(OutlinedFn->getReturnType()->isVoidTy() && 319 "OpenMP outlined functions should not return a value!"); 320 321 // For compability with the clang CG we move the outlined function after the 322 // one with the parallel region. 323 OutlinedFn->removeFromParent(); 324 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn); 325 326 // Remove the artificial entry introduced by the extractor right away, we 327 // made our own entry block after all. 328 { 329 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock(); 330 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB); 331 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry); 332 // Move instructions from the to-be-deleted ArtificialEntry to the entry 333 // basic block of the parallel region. CodeExtractor generates 334 // instructions to unwrap the aggregate argument and may sink 335 // allocas/bitcasts for values that are solely used in the outlined region 336 // and do not escape. 337 assert(!ArtificialEntry.empty() && 338 "Expected instructions to add in the outlined region entry"); 339 for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(), 340 End = ArtificialEntry.rend(); 341 It != End;) { 342 Instruction &I = *It; 343 It++; 344 345 if (I.isTerminator()) 346 continue; 347 348 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt()); 349 } 350 351 OI.EntryBB->moveBefore(&ArtificialEntry); 352 ArtificialEntry.eraseFromParent(); 353 } 354 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB); 355 assert(OutlinedFn && OutlinedFn->getNumUses() == 1); 356 357 // Run a user callback, e.g. to add attributes. 358 if (OI.PostOutlineCB) 359 OI.PostOutlineCB(*OutlinedFn); 360 } 361 362 // Remove work items that have been completed. 363 OutlineInfos = std::move(DeferredOutlines); 364 } 365 366 OpenMPIRBuilder::~OpenMPIRBuilder() { 367 assert(OutlineInfos.empty() && "There must be no outstanding outlinings"); 368 } 369 370 GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) { 371 IntegerType *I32Ty = Type::getInt32Ty(M.getContext()); 372 auto *GV = 373 new GlobalVariable(M, I32Ty, 374 /* isConstant = */ true, GlobalValue::WeakODRLinkage, 375 ConstantInt::get(I32Ty, Value), Name); 376 GV->setVisibility(GlobalValue::HiddenVisibility); 377 378 return GV; 379 } 380 381 Constant *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr, 382 uint32_t SrcLocStrSize, 383 IdentFlag LocFlags, 384 unsigned Reserve2Flags) { 385 // Enable "C-mode". 386 LocFlags |= OMP_IDENT_FLAG_KMPC; 387 388 Constant *&Ident = 389 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}]; 390 if (!Ident) { 391 Constant *I32Null = ConstantInt::getNullValue(Int32); 392 Constant *IdentData[] = {I32Null, 393 ConstantInt::get(Int32, uint32_t(LocFlags)), 394 ConstantInt::get(Int32, Reserve2Flags), 395 ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr}; 396 Constant *Initializer = 397 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData); 398 399 // Look for existing encoding of the location + flags, not needed but 400 // minimizes the difference to the existing solution while we transition. 401 for (GlobalVariable &GV : M.getGlobalList()) 402 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer()) 403 if (GV.getInitializer() == Initializer) 404 Ident = &GV; 405 406 if (!Ident) { 407 auto *GV = new GlobalVariable( 408 M, OpenMPIRBuilder::Ident, 409 /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "", 410 nullptr, GlobalValue::NotThreadLocal, 411 M.getDataLayout().getDefaultGlobalsAddressSpace()); 412 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 413 GV->setAlignment(Align(8)); 414 Ident = GV; 415 } 416 } 417 418 return ConstantExpr::getPointerBitCastOrAddrSpaceCast(Ident, IdentPtr); 419 } 420 421 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr, 422 uint32_t &SrcLocStrSize) { 423 SrcLocStrSize = LocStr.size(); 424 Constant *&SrcLocStr = SrcLocStrMap[LocStr]; 425 if (!SrcLocStr) { 426 Constant *Initializer = 427 ConstantDataArray::getString(M.getContext(), LocStr); 428 429 // Look for existing encoding of the location, not needed but minimizes the 430 // difference to the existing solution while we transition. 431 for (GlobalVariable &GV : M.getGlobalList()) 432 if (GV.isConstant() && GV.hasInitializer() && 433 GV.getInitializer() == Initializer) 434 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr); 435 436 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "", 437 /* AddressSpace */ 0, &M); 438 } 439 return SrcLocStr; 440 } 441 442 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName, 443 StringRef FileName, 444 unsigned Line, unsigned Column, 445 uint32_t &SrcLocStrSize) { 446 SmallString<128> Buffer; 447 Buffer.push_back(';'); 448 Buffer.append(FileName); 449 Buffer.push_back(';'); 450 Buffer.append(FunctionName); 451 Buffer.push_back(';'); 452 Buffer.append(std::to_string(Line)); 453 Buffer.push_back(';'); 454 Buffer.append(std::to_string(Column)); 455 Buffer.push_back(';'); 456 Buffer.push_back(';'); 457 return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize); 458 } 459 460 Constant * 461 OpenMPIRBuilder::getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize) { 462 StringRef UnknownLoc = ";unknown;unknown;0;0;;"; 463 return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize); 464 } 465 466 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, 467 uint32_t &SrcLocStrSize, 468 Function *F) { 469 DILocation *DIL = DL.get(); 470 if (!DIL) 471 return getOrCreateDefaultSrcLocStr(SrcLocStrSize); 472 StringRef FileName = M.getName(); 473 if (DIFile *DIF = DIL->getFile()) 474 if (Optional<StringRef> Source = DIF->getSource()) 475 FileName = *Source; 476 StringRef Function = DIL->getScope()->getSubprogram()->getName(); 477 if (Function.empty() && F) 478 Function = F->getName(); 479 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(), 480 DIL->getColumn(), SrcLocStrSize); 481 } 482 483 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc, 484 uint32_t &SrcLocStrSize) { 485 return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize, 486 Loc.IP.getBlock()->getParent()); 487 } 488 489 Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) { 490 return Builder.CreateCall( 491 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident, 492 "omp_global_thread_num"); 493 } 494 495 OpenMPIRBuilder::InsertPointTy 496 OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK, 497 bool ForceSimpleCall, bool CheckCancelFlag) { 498 if (!updateToLocation(Loc)) 499 return Loc.IP; 500 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag); 501 } 502 503 OpenMPIRBuilder::InsertPointTy 504 OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind, 505 bool ForceSimpleCall, bool CheckCancelFlag) { 506 // Build call __kmpc_cancel_barrier(loc, thread_id) or 507 // __kmpc_barrier(loc, thread_id); 508 509 IdentFlag BarrierLocFlags; 510 switch (Kind) { 511 case OMPD_for: 512 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR; 513 break; 514 case OMPD_sections: 515 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS; 516 break; 517 case OMPD_single: 518 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE; 519 break; 520 case OMPD_barrier: 521 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL; 522 break; 523 default: 524 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL; 525 break; 526 } 527 528 uint32_t SrcLocStrSize; 529 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 530 Value *Args[] = { 531 getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags), 532 getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))}; 533 534 // If we are in a cancellable parallel region, barriers are cancellation 535 // points. 536 // TODO: Check why we would force simple calls or to ignore the cancel flag. 537 bool UseCancelBarrier = 538 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel); 539 540 Value *Result = 541 Builder.CreateCall(getOrCreateRuntimeFunctionPtr( 542 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier 543 : OMPRTL___kmpc_barrier), 544 Args); 545 546 if (UseCancelBarrier && CheckCancelFlag) 547 emitCancelationCheckImpl(Result, OMPD_parallel); 548 549 return Builder.saveIP(); 550 } 551 552 OpenMPIRBuilder::InsertPointTy 553 OpenMPIRBuilder::createCancel(const LocationDescription &Loc, 554 Value *IfCondition, 555 omp::Directive CanceledDirective) { 556 if (!updateToLocation(Loc)) 557 return Loc.IP; 558 559 // LLVM utilities like blocks with terminators. 560 auto *UI = Builder.CreateUnreachable(); 561 562 Instruction *ThenTI = UI, *ElseTI = nullptr; 563 if (IfCondition) 564 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 565 Builder.SetInsertPoint(ThenTI); 566 567 Value *CancelKind = nullptr; 568 switch (CanceledDirective) { 569 #define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \ 570 case DirectiveEnum: \ 571 CancelKind = Builder.getInt32(Value); \ 572 break; 573 #include "llvm/Frontend/OpenMP/OMPKinds.def" 574 default: 575 llvm_unreachable("Unknown cancel kind!"); 576 } 577 578 uint32_t SrcLocStrSize; 579 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 580 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 581 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind}; 582 Value *Result = Builder.CreateCall( 583 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args); 584 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) { 585 if (CanceledDirective == OMPD_parallel) { 586 IRBuilder<>::InsertPointGuard IPG(Builder); 587 Builder.restoreIP(IP); 588 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), 589 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, 590 /* CheckCancelFlag */ false); 591 } 592 }; 593 594 // The actual cancel logic is shared with others, e.g., cancel_barriers. 595 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB); 596 597 // Update the insertion point and remove the terminator we introduced. 598 Builder.SetInsertPoint(UI->getParent()); 599 UI->eraseFromParent(); 600 601 return Builder.saveIP(); 602 } 603 604 void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, 605 omp::Directive CanceledDirective, 606 FinalizeCallbackTy ExitCB) { 607 assert(isLastFinalizationInfoCancellable(CanceledDirective) && 608 "Unexpected cancellation!"); 609 610 // For a cancel barrier we create two new blocks. 611 BasicBlock *BB = Builder.GetInsertBlock(); 612 BasicBlock *NonCancellationBlock; 613 if (Builder.GetInsertPoint() == BB->end()) { 614 // TODO: This branch will not be needed once we moved to the 615 // OpenMPIRBuilder codegen completely. 616 NonCancellationBlock = BasicBlock::Create( 617 BB->getContext(), BB->getName() + ".cont", BB->getParent()); 618 } else { 619 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint()); 620 BB->getTerminator()->eraseFromParent(); 621 Builder.SetInsertPoint(BB); 622 } 623 BasicBlock *CancellationBlock = BasicBlock::Create( 624 BB->getContext(), BB->getName() + ".cncl", BB->getParent()); 625 626 // Jump to them based on the return value. 627 Value *Cmp = Builder.CreateIsNull(CancelFlag); 628 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock, 629 /* TODO weight */ nullptr, nullptr); 630 631 // From the cancellation block we finalize all variables and go to the 632 // post finalization block that is known to the FiniCB callback. 633 Builder.SetInsertPoint(CancellationBlock); 634 if (ExitCB) 635 ExitCB(Builder.saveIP()); 636 auto &FI = FinalizationStack.back(); 637 FI.FiniCB(Builder.saveIP()); 638 639 // The continuation block is where code generation continues. 640 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin()); 641 } 642 643 IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel( 644 const LocationDescription &Loc, InsertPointTy OuterAllocaIP, 645 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, 646 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, 647 omp::ProcBindKind ProcBind, bool IsCancellable) { 648 assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous"); 649 650 if (!updateToLocation(Loc)) 651 return Loc.IP; 652 653 uint32_t SrcLocStrSize; 654 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 655 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 656 Value *ThreadID = getOrCreateThreadID(Ident); 657 658 if (NumThreads) { 659 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads) 660 Value *Args[] = { 661 Ident, ThreadID, 662 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)}; 663 Builder.CreateCall( 664 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args); 665 } 666 667 if (ProcBind != OMP_PROC_BIND_default) { 668 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind) 669 Value *Args[] = { 670 Ident, ThreadID, 671 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)}; 672 Builder.CreateCall( 673 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args); 674 } 675 676 BasicBlock *InsertBB = Builder.GetInsertBlock(); 677 Function *OuterFn = InsertBB->getParent(); 678 679 // Save the outer alloca block because the insertion iterator may get 680 // invalidated and we still need this later. 681 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock(); 682 683 // Vector to remember instructions we used only during the modeling but which 684 // we want to delete at the end. 685 SmallVector<Instruction *, 4> ToBeDeleted; 686 687 // Change the location to the outer alloca insertion point to create and 688 // initialize the allocas we pass into the parallel region. 689 Builder.restoreIP(OuterAllocaIP); 690 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr"); 691 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr"); 692 693 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the 694 // program, otherwise we only need them for modeling purposes to get the 695 // associated arguments in the outlined function. In the former case, 696 // initialize the allocas properly, in the latter case, delete them later. 697 if (IfCondition) { 698 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr); 699 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr); 700 } else { 701 ToBeDeleted.push_back(TIDAddr); 702 ToBeDeleted.push_back(ZeroAddr); 703 } 704 705 // Create an artificial insertion point that will also ensure the blocks we 706 // are about to split are not degenerated. 707 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB); 708 709 Instruction *ThenTI = UI, *ElseTI = nullptr; 710 if (IfCondition) 711 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 712 713 BasicBlock *ThenBB = ThenTI->getParent(); 714 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry"); 715 BasicBlock *PRegBodyBB = 716 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region"); 717 BasicBlock *PRegPreFiniBB = 718 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize"); 719 BasicBlock *PRegExitBB = 720 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit"); 721 722 auto FiniCBWrapper = [&](InsertPointTy IP) { 723 // Hide "open-ended" blocks from the given FiniCB by setting the right jump 724 // target to the region exit block. 725 if (IP.getBlock()->end() == IP.getPoint()) { 726 IRBuilder<>::InsertPointGuard IPG(Builder); 727 Builder.restoreIP(IP); 728 Instruction *I = Builder.CreateBr(PRegExitBB); 729 IP = InsertPointTy(I->getParent(), I->getIterator()); 730 } 731 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && 732 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && 733 "Unexpected insertion point for finalization call!"); 734 return FiniCB(IP); 735 }; 736 737 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable}); 738 739 // Generate the privatization allocas in the block that will become the entry 740 // of the outlined function. 741 Builder.SetInsertPoint(PRegEntryBB->getTerminator()); 742 InsertPointTy InnerAllocaIP = Builder.saveIP(); 743 744 AllocaInst *PrivTIDAddr = 745 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local"); 746 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid"); 747 748 // Add some fake uses for OpenMP provided arguments. 749 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use")); 750 Instruction *ZeroAddrUse = 751 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use"); 752 ToBeDeleted.push_back(ZeroAddrUse); 753 754 // ThenBB 755 // | 756 // V 757 // PRegionEntryBB <- Privatization allocas are placed here. 758 // | 759 // V 760 // PRegionBodyBB <- BodeGen is invoked here. 761 // | 762 // V 763 // PRegPreFiniBB <- The block we will start finalization from. 764 // | 765 // V 766 // PRegionExitBB <- A common exit to simplify block collection. 767 // 768 769 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n"); 770 771 // Let the caller create the body. 772 assert(BodyGenCB && "Expected body generation callback!"); 773 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin()); 774 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB); 775 776 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n"); 777 778 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call); 779 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) { 780 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) { 781 llvm::LLVMContext &Ctx = F->getContext(); 782 MDBuilder MDB(Ctx); 783 // Annotate the callback behavior of the __kmpc_fork_call: 784 // - The callback callee is argument number 2 (microtask). 785 // - The first two arguments of the callback callee are unknown (-1). 786 // - All variadic arguments to the __kmpc_fork_call are passed to the 787 // callback callee. 788 F->addMetadata( 789 llvm::LLVMContext::MD_callback, 790 *llvm::MDNode::get( 791 Ctx, {MDB.createCallbackEncoding(2, {-1, -1}, 792 /* VarArgsArePassed */ true)})); 793 } 794 } 795 796 OutlineInfo OI; 797 OI.PostOutlineCB = [=](Function &OutlinedFn) { 798 // Add some known attributes. 799 OutlinedFn.addParamAttr(0, Attribute::NoAlias); 800 OutlinedFn.addParamAttr(1, Attribute::NoAlias); 801 OutlinedFn.addFnAttr(Attribute::NoUnwind); 802 OutlinedFn.addFnAttr(Attribute::NoRecurse); 803 804 assert(OutlinedFn.arg_size() >= 2 && 805 "Expected at least tid and bounded tid as arguments"); 806 unsigned NumCapturedVars = 807 OutlinedFn.arg_size() - /* tid & bounded tid */ 2; 808 809 CallInst *CI = cast<CallInst>(OutlinedFn.user_back()); 810 CI->getParent()->setName("omp_parallel"); 811 Builder.SetInsertPoint(CI); 812 813 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn); 814 Value *ForkCallArgs[] = { 815 Ident, Builder.getInt32(NumCapturedVars), 816 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)}; 817 818 SmallVector<Value *, 16> RealArgs; 819 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs)); 820 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end()); 821 822 Builder.CreateCall(RTLFn, RealArgs); 823 824 LLVM_DEBUG(dbgs() << "With fork_call placed: " 825 << *Builder.GetInsertBlock()->getParent() << "\n"); 826 827 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end()); 828 829 // Initialize the local TID stack location with the argument value. 830 Builder.SetInsertPoint(PrivTID); 831 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin(); 832 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr); 833 834 // If no "if" clause was present we do not need the call created during 835 // outlining, otherwise we reuse it in the serialized parallel region. 836 if (!ElseTI) { 837 CI->eraseFromParent(); 838 } else { 839 840 // If an "if" clause was present we are now generating the serialized 841 // version into the "else" branch. 842 Builder.SetInsertPoint(ElseTI); 843 844 // Build calls __kmpc_serialized_parallel(&Ident, GTid); 845 Value *SerializedParallelCallArgs[] = {Ident, ThreadID}; 846 Builder.CreateCall( 847 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel), 848 SerializedParallelCallArgs); 849 850 // OutlinedFn(>id, &zero, CapturedStruct); 851 CI->removeFromParent(); 852 Builder.Insert(CI); 853 854 // __kmpc_end_serialized_parallel(&Ident, GTid); 855 Value *EndArgs[] = {Ident, ThreadID}; 856 Builder.CreateCall( 857 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel), 858 EndArgs); 859 860 LLVM_DEBUG(dbgs() << "With serialized parallel region: " 861 << *Builder.GetInsertBlock()->getParent() << "\n"); 862 } 863 864 for (Instruction *I : ToBeDeleted) 865 I->eraseFromParent(); 866 }; 867 868 // Adjust the finalization stack, verify the adjustment, and call the 869 // finalize function a last time to finalize values between the pre-fini 870 // block and the exit block if we left the parallel "the normal way". 871 auto FiniInfo = FinalizationStack.pop_back_val(); 872 (void)FiniInfo; 873 assert(FiniInfo.DK == OMPD_parallel && 874 "Unexpected finalization stack state!"); 875 876 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator(); 877 878 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator()); 879 FiniCB(PreFiniIP); 880 881 OI.EntryBB = PRegEntryBB; 882 OI.ExitBB = PRegExitBB; 883 884 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 885 SmallVector<BasicBlock *, 32> Blocks; 886 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 887 888 // Ensure a single exit node for the outlined region by creating one. 889 // We might have multiple incoming edges to the exit now due to finalizations, 890 // e.g., cancel calls that cause the control flow to leave the region. 891 BasicBlock *PRegOutlinedExitBB = PRegExitBB; 892 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt()); 893 PRegOutlinedExitBB->setName("omp.par.outlined.exit"); 894 Blocks.push_back(PRegOutlinedExitBB); 895 896 CodeExtractorAnalysisCache CEAC(*OuterFn); 897 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 898 /* AggregateArgs */ false, 899 /* BlockFrequencyInfo */ nullptr, 900 /* BranchProbabilityInfo */ nullptr, 901 /* AssumptionCache */ nullptr, 902 /* AllowVarArgs */ true, 903 /* AllowAlloca */ true, 904 /* Suffix */ ".omp_par"); 905 906 // Find inputs to, outputs from the code region. 907 BasicBlock *CommonExit = nullptr; 908 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands; 909 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit); 910 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands); 911 912 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n"); 913 914 FunctionCallee TIDRTLFn = 915 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num); 916 917 auto PrivHelper = [&](Value &V) { 918 if (&V == TIDAddr || &V == ZeroAddr) { 919 OI.ExcludeArgsFromAggregate.push_back(&V); 920 return; 921 } 922 923 SetVector<Use *> Uses; 924 for (Use &U : V.uses()) 925 if (auto *UserI = dyn_cast<Instruction>(U.getUser())) 926 if (ParallelRegionBlockSet.count(UserI->getParent())) 927 Uses.insert(&U); 928 929 // __kmpc_fork_call expects extra arguments as pointers. If the input 930 // already has a pointer type, everything is fine. Otherwise, store the 931 // value onto stack and load it back inside the to-be-outlined region. This 932 // will ensure only the pointer will be passed to the function. 933 // FIXME: if there are more than 15 trailing arguments, they must be 934 // additionally packed in a struct. 935 Value *Inner = &V; 936 if (!V.getType()->isPointerTy()) { 937 IRBuilder<>::InsertPointGuard Guard(Builder); 938 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n"); 939 940 Builder.restoreIP(OuterAllocaIP); 941 Value *Ptr = 942 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded"); 943 944 // Store to stack at end of the block that currently branches to the entry 945 // block of the to-be-outlined region. 946 Builder.SetInsertPoint(InsertBB, 947 InsertBB->getTerminator()->getIterator()); 948 Builder.CreateStore(&V, Ptr); 949 950 // Load back next to allocations in the to-be-outlined region. 951 Builder.restoreIP(InnerAllocaIP); 952 Inner = Builder.CreateLoad(V.getType(), Ptr); 953 } 954 955 Value *ReplacementValue = nullptr; 956 CallInst *CI = dyn_cast<CallInst>(&V); 957 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) { 958 ReplacementValue = PrivTID; 959 } else { 960 Builder.restoreIP( 961 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue)); 962 assert(ReplacementValue && 963 "Expected copy/create callback to set replacement value!"); 964 if (ReplacementValue == &V) 965 return; 966 } 967 968 for (Use *UPtr : Uses) 969 UPtr->set(ReplacementValue); 970 }; 971 972 // Reset the inner alloca insertion as it will be used for loading the values 973 // wrapped into pointers before passing them into the to-be-outlined region. 974 // Configure it to insert immediately after the fake use of zero address so 975 // that they are available in the generated body and so that the 976 // OpenMP-related values (thread ID and zero address pointers) remain leading 977 // in the argument list. 978 InnerAllocaIP = IRBuilder<>::InsertPoint( 979 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator()); 980 981 // Reset the outer alloca insertion point to the entry of the relevant block 982 // in case it was invalidated. 983 OuterAllocaIP = IRBuilder<>::InsertPoint( 984 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt()); 985 986 for (Value *Input : Inputs) { 987 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n"); 988 PrivHelper(*Input); 989 } 990 LLVM_DEBUG({ 991 for (Value *Output : Outputs) 992 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n"); 993 }); 994 assert(Outputs.empty() && 995 "OpenMP outlining should not produce live-out values!"); 996 997 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n"); 998 LLVM_DEBUG({ 999 for (auto *BB : Blocks) 1000 dbgs() << " PBR: " << BB->getName() << "\n"; 1001 }); 1002 1003 // Register the outlined info. 1004 addOutlineInfo(std::move(OI)); 1005 1006 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end()); 1007 UI->eraseFromParent(); 1008 1009 return AfterIP; 1010 } 1011 1012 void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) { 1013 // Build call void __kmpc_flush(ident_t *loc) 1014 uint32_t SrcLocStrSize; 1015 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1016 Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)}; 1017 1018 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args); 1019 } 1020 1021 void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) { 1022 if (!updateToLocation(Loc)) 1023 return; 1024 emitFlush(Loc); 1025 } 1026 1027 void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) { 1028 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 1029 // global_tid); 1030 uint32_t SrcLocStrSize; 1031 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1032 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1033 Value *Args[] = {Ident, getOrCreateThreadID(Ident)}; 1034 1035 // Ignore return result until untied tasks are supported. 1036 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait), 1037 Args); 1038 } 1039 1040 void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) { 1041 if (!updateToLocation(Loc)) 1042 return; 1043 emitTaskwaitImpl(Loc); 1044 } 1045 1046 void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) { 1047 // Build call __kmpc_omp_taskyield(loc, thread_id, 0); 1048 uint32_t SrcLocStrSize; 1049 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1050 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1051 Constant *I32Null = ConstantInt::getNullValue(Int32); 1052 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null}; 1053 1054 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield), 1055 Args); 1056 } 1057 1058 void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) { 1059 if (!updateToLocation(Loc)) 1060 return; 1061 emitTaskyieldImpl(Loc); 1062 } 1063 1064 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections( 1065 const LocationDescription &Loc, InsertPointTy AllocaIP, 1066 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, 1067 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) { 1068 if (!updateToLocation(Loc)) 1069 return Loc.IP; 1070 1071 auto FiniCBWrapper = [&](InsertPointTy IP) { 1072 if (IP.getBlock()->end() != IP.getPoint()) 1073 return FiniCB(IP); 1074 // This must be done otherwise any nested constructs using FinalizeOMPRegion 1075 // will fail because that function requires the Finalization Basic Block to 1076 // have a terminator, which is already removed by EmitOMPRegionBody. 1077 // IP is currently at cancelation block. 1078 // We need to backtrack to the condition block to fetch 1079 // the exit block and create a branch from cancelation 1080 // to exit block. 1081 IRBuilder<>::InsertPointGuard IPG(Builder); 1082 Builder.restoreIP(IP); 1083 auto *CaseBB = IP.getBlock()->getSinglePredecessor(); 1084 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 1085 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 1086 Instruction *I = Builder.CreateBr(ExitBB); 1087 IP = InsertPointTy(I->getParent(), I->getIterator()); 1088 return FiniCB(IP); 1089 }; 1090 1091 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable}); 1092 1093 // Each section is emitted as a switch case 1094 // Each finalization callback is handled from clang.EmitOMPSectionDirective() 1095 // -> OMP.createSection() which generates the IR for each section 1096 // Iterate through all sections and emit a switch construct: 1097 // switch (IV) { 1098 // case 0: 1099 // <SectionStmt[0]>; 1100 // break; 1101 // ... 1102 // case <NumSection> - 1: 1103 // <SectionStmt[<NumSection> - 1]>; 1104 // break; 1105 // } 1106 // ... 1107 // section_loop.after: 1108 // <FiniCB>; 1109 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) { 1110 auto *CurFn = CodeGenIP.getBlock()->getParent(); 1111 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor(); 1112 auto *ForExitBB = CodeGenIP.getBlock() 1113 ->getSinglePredecessor() 1114 ->getTerminator() 1115 ->getSuccessor(1); 1116 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB); 1117 Builder.restoreIP(CodeGenIP); 1118 unsigned CaseNumber = 0; 1119 for (auto SectionCB : SectionCBs) { 1120 auto *CaseBB = BasicBlock::Create(M.getContext(), 1121 "omp_section_loop.body.case", CurFn); 1122 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB); 1123 Builder.SetInsertPoint(CaseBB); 1124 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB); 1125 CaseNumber++; 1126 } 1127 // remove the existing terminator from body BB since there can be no 1128 // terminators after switch/case 1129 CodeGenIP.getBlock()->getTerminator()->eraseFromParent(); 1130 }; 1131 // Loop body ends here 1132 // LowerBound, UpperBound, and STride for createCanonicalLoop 1133 Type *I32Ty = Type::getInt32Ty(M.getContext()); 1134 Value *LB = ConstantInt::get(I32Ty, 0); 1135 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size()); 1136 Value *ST = ConstantInt::get(I32Ty, 1); 1137 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop( 1138 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop"); 1139 Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator()); 1140 AllocaIP = Builder.saveIP(); 1141 InsertPointTy AfterIP = 1142 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait); 1143 BasicBlock *LoopAfterBB = AfterIP.getBlock(); 1144 Instruction *SplitPos = LoopAfterBB->getTerminator(); 1145 if (!isa_and_nonnull<BranchInst>(SplitPos)) 1146 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB); 1147 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB, 1148 // which requires a BB with branch 1149 BasicBlock *ExitBB = 1150 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end"); 1151 SplitPos->eraseFromParent(); 1152 1153 // Apply the finalization callback in LoopAfterBB 1154 auto FiniInfo = FinalizationStack.pop_back_val(); 1155 assert(FiniInfo.DK == OMPD_sections && 1156 "Unexpected finalization stack state!"); 1157 Builder.SetInsertPoint(LoopAfterBB->getTerminator()); 1158 FiniInfo.FiniCB(Builder.saveIP()); 1159 Builder.SetInsertPoint(ExitBB); 1160 1161 return Builder.saveIP(); 1162 } 1163 1164 OpenMPIRBuilder::InsertPointTy 1165 OpenMPIRBuilder::createSection(const LocationDescription &Loc, 1166 BodyGenCallbackTy BodyGenCB, 1167 FinalizeCallbackTy FiniCB) { 1168 if (!updateToLocation(Loc)) 1169 return Loc.IP; 1170 1171 auto FiniCBWrapper = [&](InsertPointTy IP) { 1172 if (IP.getBlock()->end() != IP.getPoint()) 1173 return FiniCB(IP); 1174 // This must be done otherwise any nested constructs using FinalizeOMPRegion 1175 // will fail because that function requires the Finalization Basic Block to 1176 // have a terminator, which is already removed by EmitOMPRegionBody. 1177 // IP is currently at cancelation block. 1178 // We need to backtrack to the condition block to fetch 1179 // the exit block and create a branch from cancelation 1180 // to exit block. 1181 IRBuilder<>::InsertPointGuard IPG(Builder); 1182 Builder.restoreIP(IP); 1183 auto *CaseBB = Loc.IP.getBlock(); 1184 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 1185 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 1186 Instruction *I = Builder.CreateBr(ExitBB); 1187 IP = InsertPointTy(I->getParent(), I->getIterator()); 1188 return FiniCB(IP); 1189 }; 1190 1191 Directive OMPD = Directive::OMPD_sections; 1192 // Since we are using Finalization Callback here, HasFinalize 1193 // and IsCancellable have to be true 1194 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper, 1195 /*Conditional*/ false, /*hasFinalize*/ true, 1196 /*IsCancellable*/ true); 1197 } 1198 1199 /// Create a function with a unique name and a "void (i8*, i8*)" signature in 1200 /// the given module and return it. 1201 Function *getFreshReductionFunc(Module &M) { 1202 Type *VoidTy = Type::getVoidTy(M.getContext()); 1203 Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext()); 1204 auto *FuncTy = 1205 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false); 1206 return Function::Create(FuncTy, GlobalVariable::InternalLinkage, 1207 M.getDataLayout().getDefaultGlobalsAddressSpace(), 1208 ".omp.reduction.func", &M); 1209 } 1210 1211 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions( 1212 const LocationDescription &Loc, InsertPointTy AllocaIP, 1213 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) { 1214 for (const ReductionInfo &RI : ReductionInfos) { 1215 (void)RI; 1216 assert(RI.Variable && "expected non-null variable"); 1217 assert(RI.PrivateVariable && "expected non-null private variable"); 1218 assert(RI.ReductionGen && "expected non-null reduction generator callback"); 1219 assert(RI.Variable->getType() == RI.PrivateVariable->getType() && 1220 "expected variables and their private equivalents to have the same " 1221 "type"); 1222 assert(RI.Variable->getType()->isPointerTy() && 1223 "expected variables to be pointers"); 1224 } 1225 1226 if (!updateToLocation(Loc)) 1227 return InsertPointTy(); 1228 1229 BasicBlock *InsertBlock = Loc.IP.getBlock(); 1230 BasicBlock *ContinuationBlock = 1231 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize"); 1232 InsertBlock->getTerminator()->eraseFromParent(); 1233 1234 // Create and populate array of type-erased pointers to private reduction 1235 // values. 1236 unsigned NumReductions = ReductionInfos.size(); 1237 Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions); 1238 Builder.restoreIP(AllocaIP); 1239 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array"); 1240 1241 Builder.SetInsertPoint(InsertBlock, InsertBlock->end()); 1242 1243 for (auto En : enumerate(ReductionInfos)) { 1244 unsigned Index = En.index(); 1245 const ReductionInfo &RI = En.value(); 1246 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64( 1247 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index)); 1248 Value *Casted = 1249 Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(), 1250 "private.red.var." + Twine(Index) + ".casted"); 1251 Builder.CreateStore(Casted, RedArrayElemPtr); 1252 } 1253 1254 // Emit a call to the runtime function that orchestrates the reduction. 1255 // Declare the reduction function in the process. 1256 Function *Func = Builder.GetInsertBlock()->getParent(); 1257 Module *Module = Func->getParent(); 1258 Value *RedArrayPtr = 1259 Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr"); 1260 uint32_t SrcLocStrSize; 1261 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1262 bool CanGenerateAtomic = 1263 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) { 1264 return RI.AtomicReductionGen; 1265 }); 1266 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize, 1267 CanGenerateAtomic 1268 ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE 1269 : IdentFlag(0)); 1270 Value *ThreadId = getOrCreateThreadID(Ident); 1271 Constant *NumVariables = Builder.getInt32(NumReductions); 1272 const DataLayout &DL = Module->getDataLayout(); 1273 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy); 1274 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize); 1275 Function *ReductionFunc = getFreshReductionFunc(*Module); 1276 Value *Lock = getOMPCriticalRegionLock(".reduction"); 1277 Function *ReduceFunc = getOrCreateRuntimeFunctionPtr( 1278 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait 1279 : RuntimeFunction::OMPRTL___kmpc_reduce); 1280 CallInst *ReduceCall = 1281 Builder.CreateCall(ReduceFunc, 1282 {Ident, ThreadId, NumVariables, RedArraySize, 1283 RedArrayPtr, ReductionFunc, Lock}, 1284 "reduce"); 1285 1286 // Create final reduction entry blocks for the atomic and non-atomic case. 1287 // Emit IR that dispatches control flow to one of the blocks based on the 1288 // reduction supporting the atomic mode. 1289 BasicBlock *NonAtomicRedBlock = 1290 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func); 1291 BasicBlock *AtomicRedBlock = 1292 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func); 1293 SwitchInst *Switch = 1294 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2); 1295 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock); 1296 Switch->addCase(Builder.getInt32(2), AtomicRedBlock); 1297 1298 // Populate the non-atomic reduction using the elementwise reduction function. 1299 // This loads the elements from the global and private variables and reduces 1300 // them before storing back the result to the global variable. 1301 Builder.SetInsertPoint(NonAtomicRedBlock); 1302 for (auto En : enumerate(ReductionInfos)) { 1303 const ReductionInfo &RI = En.value(); 1304 Type *ValueType = RI.ElementType; 1305 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable, 1306 "red.value." + Twine(En.index())); 1307 Value *PrivateRedValue = 1308 Builder.CreateLoad(ValueType, RI.PrivateVariable, 1309 "red.private.value." + Twine(En.index())); 1310 Value *Reduced; 1311 Builder.restoreIP( 1312 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced)); 1313 if (!Builder.GetInsertBlock()) 1314 return InsertPointTy(); 1315 Builder.CreateStore(Reduced, RI.Variable); 1316 } 1317 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr( 1318 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait 1319 : RuntimeFunction::OMPRTL___kmpc_end_reduce); 1320 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock}); 1321 Builder.CreateBr(ContinuationBlock); 1322 1323 // Populate the atomic reduction using the atomic elementwise reduction 1324 // function. There are no loads/stores here because they will be happening 1325 // inside the atomic elementwise reduction. 1326 Builder.SetInsertPoint(AtomicRedBlock); 1327 if (CanGenerateAtomic) { 1328 for (const ReductionInfo &RI : ReductionInfos) { 1329 Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.ElementType, 1330 RI.Variable, RI.PrivateVariable)); 1331 if (!Builder.GetInsertBlock()) 1332 return InsertPointTy(); 1333 } 1334 Builder.CreateBr(ContinuationBlock); 1335 } else { 1336 Builder.CreateUnreachable(); 1337 } 1338 1339 // Populate the outlined reduction function using the elementwise reduction 1340 // function. Partial values are extracted from the type-erased array of 1341 // pointers to private variables. 1342 BasicBlock *ReductionFuncBlock = 1343 BasicBlock::Create(Module->getContext(), "", ReductionFunc); 1344 Builder.SetInsertPoint(ReductionFuncBlock); 1345 Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0), 1346 RedArrayTy->getPointerTo()); 1347 Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1), 1348 RedArrayTy->getPointerTo()); 1349 for (auto En : enumerate(ReductionInfos)) { 1350 const ReductionInfo &RI = En.value(); 1351 Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1352 RedArrayTy, LHSArrayPtr, 0, En.index()); 1353 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr); 1354 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType()); 1355 Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); 1356 Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1357 RedArrayTy, RHSArrayPtr, 0, En.index()); 1358 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr); 1359 Value *RHSPtr = 1360 Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType()); 1361 Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); 1362 Value *Reduced; 1363 Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced)); 1364 if (!Builder.GetInsertBlock()) 1365 return InsertPointTy(); 1366 Builder.CreateStore(Reduced, LHSPtr); 1367 } 1368 Builder.CreateRetVoid(); 1369 1370 Builder.SetInsertPoint(ContinuationBlock); 1371 return Builder.saveIP(); 1372 } 1373 1374 OpenMPIRBuilder::InsertPointTy 1375 OpenMPIRBuilder::createMaster(const LocationDescription &Loc, 1376 BodyGenCallbackTy BodyGenCB, 1377 FinalizeCallbackTy FiniCB) { 1378 1379 if (!updateToLocation(Loc)) 1380 return Loc.IP; 1381 1382 Directive OMPD = Directive::OMPD_master; 1383 uint32_t SrcLocStrSize; 1384 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1385 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1386 Value *ThreadId = getOrCreateThreadID(Ident); 1387 Value *Args[] = {Ident, ThreadId}; 1388 1389 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master); 1390 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1391 1392 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master); 1393 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 1394 1395 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1396 /*Conditional*/ true, /*hasFinalize*/ true); 1397 } 1398 1399 OpenMPIRBuilder::InsertPointTy 1400 OpenMPIRBuilder::createMasked(const LocationDescription &Loc, 1401 BodyGenCallbackTy BodyGenCB, 1402 FinalizeCallbackTy FiniCB, Value *Filter) { 1403 if (!updateToLocation(Loc)) 1404 return Loc.IP; 1405 1406 Directive OMPD = Directive::OMPD_masked; 1407 uint32_t SrcLocStrSize; 1408 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1409 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1410 Value *ThreadId = getOrCreateThreadID(Ident); 1411 Value *Args[] = {Ident, ThreadId, Filter}; 1412 Value *ArgsEnd[] = {Ident, ThreadId}; 1413 1414 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked); 1415 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1416 1417 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked); 1418 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd); 1419 1420 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1421 /*Conditional*/ true, /*hasFinalize*/ true); 1422 } 1423 1424 CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( 1425 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, 1426 BasicBlock *PostInsertBefore, const Twine &Name) { 1427 Module *M = F->getParent(); 1428 LLVMContext &Ctx = M->getContext(); 1429 Type *IndVarTy = TripCount->getType(); 1430 1431 // Create the basic block structure. 1432 BasicBlock *Preheader = 1433 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore); 1434 BasicBlock *Header = 1435 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore); 1436 BasicBlock *Cond = 1437 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore); 1438 BasicBlock *Body = 1439 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore); 1440 BasicBlock *Latch = 1441 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore); 1442 BasicBlock *Exit = 1443 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore); 1444 BasicBlock *After = 1445 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore); 1446 1447 // Use specified DebugLoc for new instructions. 1448 Builder.SetCurrentDebugLocation(DL); 1449 1450 Builder.SetInsertPoint(Preheader); 1451 Builder.CreateBr(Header); 1452 1453 Builder.SetInsertPoint(Header); 1454 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv"); 1455 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader); 1456 Builder.CreateBr(Cond); 1457 1458 Builder.SetInsertPoint(Cond); 1459 Value *Cmp = 1460 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp"); 1461 Builder.CreateCondBr(Cmp, Body, Exit); 1462 1463 Builder.SetInsertPoint(Body); 1464 Builder.CreateBr(Latch); 1465 1466 Builder.SetInsertPoint(Latch); 1467 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1), 1468 "omp_" + Name + ".next", /*HasNUW=*/true); 1469 Builder.CreateBr(Header); 1470 IndVarPHI->addIncoming(Next, Latch); 1471 1472 Builder.SetInsertPoint(Exit); 1473 Builder.CreateBr(After); 1474 1475 // Remember and return the canonical control flow. 1476 LoopInfos.emplace_front(); 1477 CanonicalLoopInfo *CL = &LoopInfos.front(); 1478 1479 CL->Header = Header; 1480 CL->Cond = Cond; 1481 CL->Latch = Latch; 1482 CL->Exit = Exit; 1483 1484 #ifndef NDEBUG 1485 CL->assertOK(); 1486 #endif 1487 return CL; 1488 } 1489 1490 CanonicalLoopInfo * 1491 OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, 1492 LoopBodyGenCallbackTy BodyGenCB, 1493 Value *TripCount, const Twine &Name) { 1494 BasicBlock *BB = Loc.IP.getBlock(); 1495 BasicBlock *NextBB = BB->getNextNode(); 1496 1497 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(), 1498 NextBB, NextBB, Name); 1499 BasicBlock *After = CL->getAfter(); 1500 1501 // If location is not set, don't connect the loop. 1502 if (updateToLocation(Loc)) { 1503 // Split the loop at the insertion point: Branch to the preheader and move 1504 // every following instruction to after the loop (the After BB). Also, the 1505 // new successor is the loop's after block. 1506 spliceBB(Builder, After, /*CreateBranch=*/false); 1507 Builder.CreateBr(CL->getPreheader()); 1508 } 1509 1510 // Emit the body content. We do it after connecting the loop to the CFG to 1511 // avoid that the callback encounters degenerate BBs. 1512 BodyGenCB(CL->getBodyIP(), CL->getIndVar()); 1513 1514 #ifndef NDEBUG 1515 CL->assertOK(); 1516 #endif 1517 return CL; 1518 } 1519 1520 CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop( 1521 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, 1522 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, 1523 InsertPointTy ComputeIP, const Twine &Name) { 1524 1525 // Consider the following difficulties (assuming 8-bit signed integers): 1526 // * Adding \p Step to the loop counter which passes \p Stop may overflow: 1527 // DO I = 1, 100, 50 1528 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction: 1529 // DO I = 100, 0, -128 1530 1531 // Start, Stop and Step must be of the same integer type. 1532 auto *IndVarTy = cast<IntegerType>(Start->getType()); 1533 assert(IndVarTy == Stop->getType() && "Stop type mismatch"); 1534 assert(IndVarTy == Step->getType() && "Step type mismatch"); 1535 1536 LocationDescription ComputeLoc = 1537 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; 1538 updateToLocation(ComputeLoc); 1539 1540 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0); 1541 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 1542 1543 // Like Step, but always positive. 1544 Value *Incr = Step; 1545 1546 // Distance between Start and Stop; always positive. 1547 Value *Span; 1548 1549 // Condition whether there are no iterations are executed at all, e.g. because 1550 // UB < LB. 1551 Value *ZeroCmp; 1552 1553 if (IsSigned) { 1554 // Ensure that increment is positive. If not, negate and invert LB and UB. 1555 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero); 1556 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step); 1557 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start); 1558 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop); 1559 Span = Builder.CreateSub(UB, LB, "", false, true); 1560 ZeroCmp = Builder.CreateICmp( 1561 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB); 1562 } else { 1563 Span = Builder.CreateSub(Stop, Start, "", true); 1564 ZeroCmp = Builder.CreateICmp( 1565 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start); 1566 } 1567 1568 Value *CountIfLooping; 1569 if (InclusiveStop) { 1570 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One); 1571 } else { 1572 // Avoid incrementing past stop since it could overflow. 1573 Value *CountIfTwo = Builder.CreateAdd( 1574 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One); 1575 Value *OneCmp = Builder.CreateICmp( 1576 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr); 1577 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo); 1578 } 1579 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping, 1580 "omp_" + Name + ".tripcount"); 1581 1582 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { 1583 Builder.restoreIP(CodeGenIP); 1584 Value *Span = Builder.CreateMul(IV, Step); 1585 Value *IndVar = Builder.CreateAdd(Span, Start); 1586 BodyGenCB(Builder.saveIP(), IndVar); 1587 }; 1588 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP(); 1589 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name); 1590 } 1591 1592 // Returns an LLVM function to call for initializing loop bounds using OpenMP 1593 // static scheduling depending on `type`. Only i32 and i64 are supported by the 1594 // runtime. Always interpret integers as unsigned similarly to 1595 // CanonicalLoopInfo. 1596 static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, 1597 OpenMPIRBuilder &OMPBuilder) { 1598 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1599 if (Bitwidth == 32) 1600 return OMPBuilder.getOrCreateRuntimeFunction( 1601 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u); 1602 if (Bitwidth == 64) 1603 return OMPBuilder.getOrCreateRuntimeFunction( 1604 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u); 1605 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1606 } 1607 1608 OpenMPIRBuilder::InsertPointTy 1609 OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1610 InsertPointTy AllocaIP, 1611 bool NeedsBarrier) { 1612 assert(CLI->isValid() && "Requires a valid canonical loop"); 1613 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && 1614 "Require dedicated allocate IP"); 1615 1616 // Set up the source location value for OpenMP runtime. 1617 Builder.restoreIP(CLI->getPreheaderIP()); 1618 Builder.SetCurrentDebugLocation(DL); 1619 1620 uint32_t SrcLocStrSize; 1621 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); 1622 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1623 1624 // Declare useful OpenMP runtime functions. 1625 Value *IV = CLI->getIndVar(); 1626 Type *IVTy = IV->getType(); 1627 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this); 1628 FunctionCallee StaticFini = 1629 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); 1630 1631 // Allocate space for computed loop bounds as expected by the "init" function. 1632 Builder.restoreIP(AllocaIP); 1633 Type *I32Type = Type::getInt32Ty(M.getContext()); 1634 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1635 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1636 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1637 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1638 1639 // At the end of the preheader, prepare for calling the "init" function by 1640 // storing the current loop bounds into the allocated space. A canonical loop 1641 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1642 // and produces an inclusive upper bound. 1643 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); 1644 Constant *Zero = ConstantInt::get(IVTy, 0); 1645 Constant *One = ConstantInt::get(IVTy, 1); 1646 Builder.CreateStore(Zero, PLowerBound); 1647 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One); 1648 Builder.CreateStore(UpperBound, PUpperBound); 1649 Builder.CreateStore(One, PStride); 1650 1651 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1652 1653 Constant *SchedulingType = 1654 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static)); 1655 1656 // Call the "init" function and update the trip count of the loop with the 1657 // value it produced. 1658 Builder.CreateCall(StaticInit, 1659 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, 1660 PUpperBound, PStride, One, Zero}); 1661 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); 1662 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); 1663 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); 1664 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One); 1665 CLI->setTripCount(TripCount); 1666 1667 // Update all uses of the induction variable except the one in the condition 1668 // block that compares it with the actual upper bound, and the increment in 1669 // the latch block. 1670 1671 CLI->mapIndVar([&](Instruction *OldIV) -> Value * { 1672 Builder.SetInsertPoint(CLI->getBody(), 1673 CLI->getBody()->getFirstInsertionPt()); 1674 Builder.SetCurrentDebugLocation(DL); 1675 return Builder.CreateAdd(OldIV, LowerBound); 1676 }); 1677 1678 // In the "exit" block, call the "fini" function. 1679 Builder.SetInsertPoint(CLI->getExit(), 1680 CLI->getExit()->getTerminator()->getIterator()); 1681 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); 1682 1683 // Add the barrier if requested. 1684 if (NeedsBarrier) 1685 createBarrier(LocationDescription(Builder.saveIP(), DL), 1686 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 1687 /* CheckCancelFlag */ false); 1688 1689 InsertPointTy AfterIP = CLI->getAfterIP(); 1690 CLI->invalidate(); 1691 1692 return AfterIP; 1693 } 1694 1695 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop( 1696 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, 1697 bool NeedsBarrier, Value *ChunkSize) { 1698 assert(CLI->isValid() && "Requires a valid canonical loop"); 1699 assert(ChunkSize && "Chunk size is required"); 1700 1701 LLVMContext &Ctx = CLI->getFunction()->getContext(); 1702 Value *IV = CLI->getIndVar(); 1703 Value *OrigTripCount = CLI->getTripCount(); 1704 Type *IVTy = IV->getType(); 1705 assert(IVTy->getIntegerBitWidth() <= 64 && 1706 "Max supported tripcount bitwidth is 64 bits"); 1707 Type *InternalIVTy = IVTy->getIntegerBitWidth() <= 32 ? Type::getInt32Ty(Ctx) 1708 : Type::getInt64Ty(Ctx); 1709 Type *I32Type = Type::getInt32Ty(M.getContext()); 1710 Constant *Zero = ConstantInt::get(InternalIVTy, 0); 1711 Constant *One = ConstantInt::get(InternalIVTy, 1); 1712 1713 // Declare useful OpenMP runtime functions. 1714 FunctionCallee StaticInit = 1715 getKmpcForStaticInitForType(InternalIVTy, M, *this); 1716 FunctionCallee StaticFini = 1717 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); 1718 1719 // Allocate space for computed loop bounds as expected by the "init" function. 1720 Builder.restoreIP(AllocaIP); 1721 Builder.SetCurrentDebugLocation(DL); 1722 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1723 Value *PLowerBound = 1724 Builder.CreateAlloca(InternalIVTy, nullptr, "p.lowerbound"); 1725 Value *PUpperBound = 1726 Builder.CreateAlloca(InternalIVTy, nullptr, "p.upperbound"); 1727 Value *PStride = Builder.CreateAlloca(InternalIVTy, nullptr, "p.stride"); 1728 1729 // Set up the source location value for the OpenMP runtime. 1730 Builder.restoreIP(CLI->getPreheaderIP()); 1731 Builder.SetCurrentDebugLocation(DL); 1732 1733 // TODO: Detect overflow in ubsan or max-out with current tripcount. 1734 Value *CastedChunkSize = 1735 Builder.CreateZExtOrTrunc(ChunkSize, InternalIVTy, "chunksize"); 1736 Value *CastedTripCount = 1737 Builder.CreateZExt(OrigTripCount, InternalIVTy, "tripcount"); 1738 1739 Constant *SchedulingType = ConstantInt::get( 1740 I32Type, static_cast<int>(OMPScheduleType::StaticChunked)); 1741 Builder.CreateStore(Zero, PLowerBound); 1742 Value *OrigUpperBound = Builder.CreateSub(CastedTripCount, One); 1743 Builder.CreateStore(OrigUpperBound, PUpperBound); 1744 Builder.CreateStore(One, PStride); 1745 1746 // Call the "init" function and update the trip count of the loop with the 1747 // value it produced. 1748 uint32_t SrcLocStrSize; 1749 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); 1750 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1751 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1752 Builder.CreateCall(StaticInit, 1753 {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum, 1754 /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter, 1755 /*plower=*/PLowerBound, /*pupper=*/PUpperBound, 1756 /*pstride=*/PStride, /*incr=*/One, 1757 /*chunk=*/CastedChunkSize}); 1758 1759 // Load values written by the "init" function. 1760 Value *FirstChunkStart = 1761 Builder.CreateLoad(InternalIVTy, PLowerBound, "omp_firstchunk.lb"); 1762 Value *FirstChunkStop = 1763 Builder.CreateLoad(InternalIVTy, PUpperBound, "omp_firstchunk.ub"); 1764 Value *FirstChunkEnd = Builder.CreateAdd(FirstChunkStop, One); 1765 Value *ChunkRange = 1766 Builder.CreateSub(FirstChunkEnd, FirstChunkStart, "omp_chunk.range"); 1767 Value *NextChunkStride = 1768 Builder.CreateLoad(InternalIVTy, PStride, "omp_dispatch.stride"); 1769 1770 // Create outer "dispatch" loop for enumerating the chunks. 1771 BasicBlock *DispatchEnter = splitBB(Builder, true); 1772 Value *DispatchCounter; 1773 CanonicalLoopInfo *DispatchCLI = createCanonicalLoop( 1774 {Builder.saveIP(), DL}, 1775 [&](InsertPointTy BodyIP, Value *Counter) { DispatchCounter = Counter; }, 1776 FirstChunkStart, CastedTripCount, NextChunkStride, 1777 /*IsSigned=*/false, /*InclusiveStop=*/false, /*ComputeIP=*/{}, 1778 "dispatch"); 1779 1780 // Remember the BasicBlocks of the dispatch loop we need, then invalidate to 1781 // not have to preserve the canonical invariant. 1782 BasicBlock *DispatchBody = DispatchCLI->getBody(); 1783 BasicBlock *DispatchLatch = DispatchCLI->getLatch(); 1784 BasicBlock *DispatchExit = DispatchCLI->getExit(); 1785 BasicBlock *DispatchAfter = DispatchCLI->getAfter(); 1786 DispatchCLI->invalidate(); 1787 1788 // Rewire the original loop to become the chunk loop inside the dispatch loop. 1789 redirectTo(DispatchAfter, CLI->getAfter(), DL); 1790 redirectTo(CLI->getExit(), DispatchLatch, DL); 1791 redirectTo(DispatchBody, DispatchEnter, DL); 1792 1793 // Prepare the prolog of the chunk loop. 1794 Builder.restoreIP(CLI->getPreheaderIP()); 1795 Builder.SetCurrentDebugLocation(DL); 1796 1797 // Compute the number of iterations of the chunk loop. 1798 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); 1799 Value *ChunkEnd = Builder.CreateAdd(DispatchCounter, ChunkRange); 1800 Value *IsLastChunk = 1801 Builder.CreateICmpUGE(ChunkEnd, CastedTripCount, "omp_chunk.is_last"); 1802 Value *CountUntilOrigTripCount = 1803 Builder.CreateSub(CastedTripCount, DispatchCounter); 1804 Value *ChunkTripCount = Builder.CreateSelect( 1805 IsLastChunk, CountUntilOrigTripCount, ChunkRange, "omp_chunk.tripcount"); 1806 Value *BackcastedChunkTC = 1807 Builder.CreateTrunc(ChunkTripCount, IVTy, "omp_chunk.tripcount.trunc"); 1808 CLI->setTripCount(BackcastedChunkTC); 1809 1810 // Update all uses of the induction variable except the one in the condition 1811 // block that compares it with the actual upper bound, and the increment in 1812 // the latch block. 1813 Value *BackcastedDispatchCounter = 1814 Builder.CreateTrunc(DispatchCounter, IVTy, "omp_dispatch.iv.trunc"); 1815 CLI->mapIndVar([&](Instruction *) -> Value * { 1816 Builder.restoreIP(CLI->getBodyIP()); 1817 return Builder.CreateAdd(IV, BackcastedDispatchCounter); 1818 }); 1819 1820 // In the "exit" block, call the "fini" function. 1821 Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt()); 1822 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); 1823 1824 // Add the barrier if requested. 1825 if (NeedsBarrier) 1826 createBarrier(LocationDescription(Builder.saveIP(), DL), OMPD_for, 1827 /*ForceSimpleCall=*/false, /*CheckCancelFlag=*/false); 1828 1829 #ifndef NDEBUG 1830 // Even though we currently do not support applying additional methods to it, 1831 // the chunk loop should remain a canonical loop. 1832 CLI->assertOK(); 1833 #endif 1834 1835 return {DispatchAfter, DispatchAfter->getFirstInsertionPt()}; 1836 } 1837 1838 OpenMPIRBuilder::InsertPointTy 1839 OpenMPIRBuilder::applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1840 InsertPointTy AllocaIP, bool NeedsBarrier, 1841 llvm::omp::ScheduleKind SchedKind, 1842 llvm::Value *ChunkSize) { 1843 switch (SchedKind) { 1844 case llvm::omp::ScheduleKind::OMP_SCHEDULE_Default: 1845 assert(!ChunkSize && "No chunk size with default schedule (which for clang " 1846 "is static non-chunked)"); 1847 LLVM_FALLTHROUGH; 1848 case llvm::omp::ScheduleKind::OMP_SCHEDULE_Static: 1849 if (ChunkSize) 1850 return applyStaticChunkedWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier, 1851 ChunkSize); 1852 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier); 1853 case llvm::omp::ScheduleKind::OMP_SCHEDULE_Auto: 1854 assert(!ChunkSize && "Chunk size with auto scheduling not user-defined"); 1855 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, OMPScheduleType::Auto, 1856 NeedsBarrier, nullptr); 1857 case llvm::omp::ScheduleKind::OMP_SCHEDULE_Dynamic: 1858 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, 1859 OMPScheduleType::DynamicChunked, 1860 NeedsBarrier, ChunkSize); 1861 case llvm::omp::ScheduleKind::OMP_SCHEDULE_Guided: 1862 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, 1863 OMPScheduleType::GuidedChunked, 1864 NeedsBarrier, ChunkSize); 1865 case llvm::omp::ScheduleKind::OMP_SCHEDULE_Runtime: 1866 assert(!ChunkSize && 1867 "Chunk size with runtime scheduling implied to be one"); 1868 return applyDynamicWorkshareLoop( 1869 DL, CLI, AllocaIP, OMPScheduleType::Runtime, NeedsBarrier, nullptr); 1870 } 1871 1872 llvm_unreachable("Unknown/unimplemented schedule kind"); 1873 } 1874 1875 /// Returns an LLVM function to call for initializing loop bounds using OpenMP 1876 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1877 /// the runtime. Always interpret integers as unsigned similarly to 1878 /// CanonicalLoopInfo. 1879 static FunctionCallee 1880 getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1881 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1882 if (Bitwidth == 32) 1883 return OMPBuilder.getOrCreateRuntimeFunction( 1884 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u); 1885 if (Bitwidth == 64) 1886 return OMPBuilder.getOrCreateRuntimeFunction( 1887 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u); 1888 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1889 } 1890 1891 /// Returns an LLVM function to call for updating the next loop using OpenMP 1892 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1893 /// the runtime. Always interpret integers as unsigned similarly to 1894 /// CanonicalLoopInfo. 1895 static FunctionCallee 1896 getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1897 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1898 if (Bitwidth == 32) 1899 return OMPBuilder.getOrCreateRuntimeFunction( 1900 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u); 1901 if (Bitwidth == 64) 1902 return OMPBuilder.getOrCreateRuntimeFunction( 1903 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u); 1904 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1905 } 1906 1907 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop( 1908 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, 1909 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) { 1910 assert(CLI->isValid() && "Requires a valid canonical loop"); 1911 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && 1912 "Require dedicated allocate IP"); 1913 1914 // Set up the source location value for OpenMP runtime. 1915 Builder.SetCurrentDebugLocation(DL); 1916 1917 uint32_t SrcLocStrSize; 1918 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); 1919 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1920 1921 // Declare useful OpenMP runtime functions. 1922 Value *IV = CLI->getIndVar(); 1923 Type *IVTy = IV->getType(); 1924 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this); 1925 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this); 1926 1927 // Allocate space for computed loop bounds as expected by the "init" function. 1928 Builder.restoreIP(AllocaIP); 1929 Type *I32Type = Type::getInt32Ty(M.getContext()); 1930 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1931 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1932 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1933 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1934 1935 // At the end of the preheader, prepare for calling the "init" function by 1936 // storing the current loop bounds into the allocated space. A canonical loop 1937 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1938 // and produces an inclusive upper bound. 1939 BasicBlock *PreHeader = CLI->getPreheader(); 1940 Builder.SetInsertPoint(PreHeader->getTerminator()); 1941 Constant *One = ConstantInt::get(IVTy, 1); 1942 Builder.CreateStore(One, PLowerBound); 1943 Value *UpperBound = CLI->getTripCount(); 1944 Builder.CreateStore(UpperBound, PUpperBound); 1945 Builder.CreateStore(One, PStride); 1946 1947 BasicBlock *Header = CLI->getHeader(); 1948 BasicBlock *Exit = CLI->getExit(); 1949 BasicBlock *Cond = CLI->getCond(); 1950 InsertPointTy AfterIP = CLI->getAfterIP(); 1951 1952 // The CLI will be "broken" in the code below, as the loop is no longer 1953 // a valid canonical loop. 1954 1955 if (!Chunk) 1956 Chunk = One; 1957 1958 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1959 1960 Constant *SchedulingType = 1961 ConstantInt::get(I32Type, static_cast<int>(SchedType)); 1962 1963 // Call the "init" function. 1964 Builder.CreateCall(DynamicInit, 1965 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One, 1966 UpperBound, /* step */ One, Chunk}); 1967 1968 // An outer loop around the existing one. 1969 BasicBlock *OuterCond = BasicBlock::Create( 1970 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond", 1971 PreHeader->getParent()); 1972 // This needs to be 32-bit always, so can't use the IVTy Zero above. 1973 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt()); 1974 Value *Res = 1975 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter, 1976 PLowerBound, PUpperBound, PStride}); 1977 Constant *Zero32 = ConstantInt::get(I32Type, 0); 1978 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32); 1979 Value *LowerBound = 1980 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb"); 1981 Builder.CreateCondBr(MoreWork, Header, Exit); 1982 1983 // Change PHI-node in loop header to use outer cond rather than preheader, 1984 // and set IV to the LowerBound. 1985 Instruction *Phi = &Header->front(); 1986 auto *PI = cast<PHINode>(Phi); 1987 PI->setIncomingBlock(0, OuterCond); 1988 PI->setIncomingValue(0, LowerBound); 1989 1990 // Then set the pre-header to jump to the OuterCond 1991 Instruction *Term = PreHeader->getTerminator(); 1992 auto *Br = cast<BranchInst>(Term); 1993 Br->setSuccessor(0, OuterCond); 1994 1995 // Modify the inner condition: 1996 // * Use the UpperBound returned from the DynamicNext call. 1997 // * jump to the loop outer loop when done with one of the inner loops. 1998 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt()); 1999 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub"); 2000 Instruction *Comp = &*Builder.GetInsertPoint(); 2001 auto *CI = cast<CmpInst>(Comp); 2002 CI->setOperand(1, UpperBound); 2003 // Redirect the inner exit to branch to outer condition. 2004 Instruction *Branch = &Cond->back(); 2005 auto *BI = cast<BranchInst>(Branch); 2006 assert(BI->getSuccessor(1) == Exit); 2007 BI->setSuccessor(1, OuterCond); 2008 2009 // Add the barrier if requested. 2010 if (NeedsBarrier) { 2011 Builder.SetInsertPoint(&Exit->back()); 2012 createBarrier(LocationDescription(Builder.saveIP(), DL), 2013 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 2014 /* CheckCancelFlag */ false); 2015 } 2016 2017 CLI->invalidate(); 2018 return AfterIP; 2019 } 2020 2021 /// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is, 2022 /// after this \p OldTarget will be orphaned. 2023 static void redirectAllPredecessorsTo(BasicBlock *OldTarget, 2024 BasicBlock *NewTarget, DebugLoc DL) { 2025 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget))) 2026 redirectTo(Pred, NewTarget, DL); 2027 } 2028 2029 /// Determine which blocks in \p BBs are reachable from outside and remove the 2030 /// ones that are not reachable from the function. 2031 static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) { 2032 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()}; 2033 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) { 2034 for (Use &U : BB->uses()) { 2035 auto *UseInst = dyn_cast<Instruction>(U.getUser()); 2036 if (!UseInst) 2037 continue; 2038 if (BBsToErase.count(UseInst->getParent())) 2039 continue; 2040 return true; 2041 } 2042 return false; 2043 }; 2044 2045 while (true) { 2046 bool Changed = false; 2047 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) { 2048 if (HasRemainingUses(BB)) { 2049 BBsToErase.erase(BB); 2050 Changed = true; 2051 } 2052 } 2053 if (!Changed) 2054 break; 2055 } 2056 2057 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end()); 2058 DeleteDeadBlocks(BBVec); 2059 } 2060 2061 CanonicalLoopInfo * 2062 OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 2063 InsertPointTy ComputeIP) { 2064 assert(Loops.size() >= 1 && "At least one loop required"); 2065 size_t NumLoops = Loops.size(); 2066 2067 // Nothing to do if there is already just one loop. 2068 if (NumLoops == 1) 2069 return Loops.front(); 2070 2071 CanonicalLoopInfo *Outermost = Loops.front(); 2072 CanonicalLoopInfo *Innermost = Loops.back(); 2073 BasicBlock *OrigPreheader = Outermost->getPreheader(); 2074 BasicBlock *OrigAfter = Outermost->getAfter(); 2075 Function *F = OrigPreheader->getParent(); 2076 2077 // Loop control blocks that may become orphaned later. 2078 SmallVector<BasicBlock *, 12> OldControlBBs; 2079 OldControlBBs.reserve(6 * Loops.size()); 2080 for (CanonicalLoopInfo *Loop : Loops) 2081 Loop->collectControlBlocks(OldControlBBs); 2082 2083 // Setup the IRBuilder for inserting the trip count computation. 2084 Builder.SetCurrentDebugLocation(DL); 2085 if (ComputeIP.isSet()) 2086 Builder.restoreIP(ComputeIP); 2087 else 2088 Builder.restoreIP(Outermost->getPreheaderIP()); 2089 2090 // Derive the collapsed' loop trip count. 2091 // TODO: Find common/largest indvar type. 2092 Value *CollapsedTripCount = nullptr; 2093 for (CanonicalLoopInfo *L : Loops) { 2094 assert(L->isValid() && 2095 "All loops to collapse must be valid canonical loops"); 2096 Value *OrigTripCount = L->getTripCount(); 2097 if (!CollapsedTripCount) { 2098 CollapsedTripCount = OrigTripCount; 2099 continue; 2100 } 2101 2102 // TODO: Enable UndefinedSanitizer to diagnose an overflow here. 2103 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount, 2104 {}, /*HasNUW=*/true); 2105 } 2106 2107 // Create the collapsed loop control flow. 2108 CanonicalLoopInfo *Result = 2109 createLoopSkeleton(DL, CollapsedTripCount, F, 2110 OrigPreheader->getNextNode(), OrigAfter, "collapsed"); 2111 2112 // Build the collapsed loop body code. 2113 // Start with deriving the input loop induction variables from the collapsed 2114 // one, using a divmod scheme. To preserve the original loops' order, the 2115 // innermost loop use the least significant bits. 2116 Builder.restoreIP(Result->getBodyIP()); 2117 2118 Value *Leftover = Result->getIndVar(); 2119 SmallVector<Value *> NewIndVars; 2120 NewIndVars.resize(NumLoops); 2121 for (int i = NumLoops - 1; i >= 1; --i) { 2122 Value *OrigTripCount = Loops[i]->getTripCount(); 2123 2124 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount); 2125 NewIndVars[i] = NewIndVar; 2126 2127 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount); 2128 } 2129 // Outermost loop gets all the remaining bits. 2130 NewIndVars[0] = Leftover; 2131 2132 // Construct the loop body control flow. 2133 // We progressively construct the branch structure following in direction of 2134 // the control flow, from the leading in-between code, the loop nest body, the 2135 // trailing in-between code, and rejoining the collapsed loop's latch. 2136 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If 2137 // the ContinueBlock is set, continue with that block. If ContinuePred, use 2138 // its predecessors as sources. 2139 BasicBlock *ContinueBlock = Result->getBody(); 2140 BasicBlock *ContinuePred = nullptr; 2141 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest, 2142 BasicBlock *NextSrc) { 2143 if (ContinueBlock) 2144 redirectTo(ContinueBlock, Dest, DL); 2145 else 2146 redirectAllPredecessorsTo(ContinuePred, Dest, DL); 2147 2148 ContinueBlock = nullptr; 2149 ContinuePred = NextSrc; 2150 }; 2151 2152 // The code before the nested loop of each level. 2153 // Because we are sinking it into the nest, it will be executed more often 2154 // that the original loop. More sophisticated schemes could keep track of what 2155 // the in-between code is and instantiate it only once per thread. 2156 for (size_t i = 0; i < NumLoops - 1; ++i) 2157 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader()); 2158 2159 // Connect the loop nest body. 2160 ContinueWith(Innermost->getBody(), Innermost->getLatch()); 2161 2162 // The code after the nested loop at each level. 2163 for (size_t i = NumLoops - 1; i > 0; --i) 2164 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch()); 2165 2166 // Connect the finished loop to the collapsed loop latch. 2167 ContinueWith(Result->getLatch(), nullptr); 2168 2169 // Replace the input loops with the new collapsed loop. 2170 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL); 2171 redirectTo(Result->getAfter(), Outermost->getAfter(), DL); 2172 2173 // Replace the input loop indvars with the derived ones. 2174 for (size_t i = 0; i < NumLoops; ++i) 2175 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]); 2176 2177 // Remove unused parts of the input loops. 2178 removeUnusedBlocksFromParent(OldControlBBs); 2179 2180 for (CanonicalLoopInfo *L : Loops) 2181 L->invalidate(); 2182 2183 #ifndef NDEBUG 2184 Result->assertOK(); 2185 #endif 2186 return Result; 2187 } 2188 2189 std::vector<CanonicalLoopInfo *> 2190 OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 2191 ArrayRef<Value *> TileSizes) { 2192 assert(TileSizes.size() == Loops.size() && 2193 "Must pass as many tile sizes as there are loops"); 2194 int NumLoops = Loops.size(); 2195 assert(NumLoops >= 1 && "At least one loop to tile required"); 2196 2197 CanonicalLoopInfo *OutermostLoop = Loops.front(); 2198 CanonicalLoopInfo *InnermostLoop = Loops.back(); 2199 Function *F = OutermostLoop->getBody()->getParent(); 2200 BasicBlock *InnerEnter = InnermostLoop->getBody(); 2201 BasicBlock *InnerLatch = InnermostLoop->getLatch(); 2202 2203 // Loop control blocks that may become orphaned later. 2204 SmallVector<BasicBlock *, 12> OldControlBBs; 2205 OldControlBBs.reserve(6 * Loops.size()); 2206 for (CanonicalLoopInfo *Loop : Loops) 2207 Loop->collectControlBlocks(OldControlBBs); 2208 2209 // Collect original trip counts and induction variable to be accessible by 2210 // index. Also, the structure of the original loops is not preserved during 2211 // the construction of the tiled loops, so do it before we scavenge the BBs of 2212 // any original CanonicalLoopInfo. 2213 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars; 2214 for (CanonicalLoopInfo *L : Loops) { 2215 assert(L->isValid() && "All input loops must be valid canonical loops"); 2216 OrigTripCounts.push_back(L->getTripCount()); 2217 OrigIndVars.push_back(L->getIndVar()); 2218 } 2219 2220 // Collect the code between loop headers. These may contain SSA definitions 2221 // that are used in the loop nest body. To be usable with in the innermost 2222 // body, these BasicBlocks will be sunk into the loop nest body. That is, 2223 // these instructions may be executed more often than before the tiling. 2224 // TODO: It would be sufficient to only sink them into body of the 2225 // corresponding tile loop. 2226 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode; 2227 for (int i = 0; i < NumLoops - 1; ++i) { 2228 CanonicalLoopInfo *Surrounding = Loops[i]; 2229 CanonicalLoopInfo *Nested = Loops[i + 1]; 2230 2231 BasicBlock *EnterBB = Surrounding->getBody(); 2232 BasicBlock *ExitBB = Nested->getHeader(); 2233 InbetweenCode.emplace_back(EnterBB, ExitBB); 2234 } 2235 2236 // Compute the trip counts of the floor loops. 2237 Builder.SetCurrentDebugLocation(DL); 2238 Builder.restoreIP(OutermostLoop->getPreheaderIP()); 2239 SmallVector<Value *, 4> FloorCount, FloorRems; 2240 for (int i = 0; i < NumLoops; ++i) { 2241 Value *TileSize = TileSizes[i]; 2242 Value *OrigTripCount = OrigTripCounts[i]; 2243 Type *IVType = OrigTripCount->getType(); 2244 2245 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize); 2246 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize); 2247 2248 // 0 if tripcount divides the tilesize, 1 otherwise. 2249 // 1 means we need an additional iteration for a partial tile. 2250 // 2251 // Unfortunately we cannot just use the roundup-formula 2252 // (tripcount + tilesize - 1)/tilesize 2253 // because the summation might overflow. We do not want introduce undefined 2254 // behavior when the untiled loop nest did not. 2255 Value *FloorTripOverflow = 2256 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0)); 2257 2258 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType); 2259 FloorTripCount = 2260 Builder.CreateAdd(FloorTripCount, FloorTripOverflow, 2261 "omp_floor" + Twine(i) + ".tripcount", true); 2262 2263 // Remember some values for later use. 2264 FloorCount.push_back(FloorTripCount); 2265 FloorRems.push_back(FloorTripRem); 2266 } 2267 2268 // Generate the new loop nest, from the outermost to the innermost. 2269 std::vector<CanonicalLoopInfo *> Result; 2270 Result.reserve(NumLoops * 2); 2271 2272 // The basic block of the surrounding loop that enters the nest generated 2273 // loop. 2274 BasicBlock *Enter = OutermostLoop->getPreheader(); 2275 2276 // The basic block of the surrounding loop where the inner code should 2277 // continue. 2278 BasicBlock *Continue = OutermostLoop->getAfter(); 2279 2280 // Where the next loop basic block should be inserted. 2281 BasicBlock *OutroInsertBefore = InnermostLoop->getExit(); 2282 2283 auto EmbeddNewLoop = 2284 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore]( 2285 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * { 2286 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton( 2287 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name); 2288 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL); 2289 redirectTo(EmbeddedLoop->getAfter(), Continue, DL); 2290 2291 // Setup the position where the next embedded loop connects to this loop. 2292 Enter = EmbeddedLoop->getBody(); 2293 Continue = EmbeddedLoop->getLatch(); 2294 OutroInsertBefore = EmbeddedLoop->getLatch(); 2295 return EmbeddedLoop; 2296 }; 2297 2298 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts, 2299 const Twine &NameBase) { 2300 for (auto P : enumerate(TripCounts)) { 2301 CanonicalLoopInfo *EmbeddedLoop = 2302 EmbeddNewLoop(P.value(), NameBase + Twine(P.index())); 2303 Result.push_back(EmbeddedLoop); 2304 } 2305 }; 2306 2307 EmbeddNewLoops(FloorCount, "floor"); 2308 2309 // Within the innermost floor loop, emit the code that computes the tile 2310 // sizes. 2311 Builder.SetInsertPoint(Enter->getTerminator()); 2312 SmallVector<Value *, 4> TileCounts; 2313 for (int i = 0; i < NumLoops; ++i) { 2314 CanonicalLoopInfo *FloorLoop = Result[i]; 2315 Value *TileSize = TileSizes[i]; 2316 2317 Value *FloorIsEpilogue = 2318 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]); 2319 Value *TileTripCount = 2320 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize); 2321 2322 TileCounts.push_back(TileTripCount); 2323 } 2324 2325 // Create the tile loops. 2326 EmbeddNewLoops(TileCounts, "tile"); 2327 2328 // Insert the inbetween code into the body. 2329 BasicBlock *BodyEnter = Enter; 2330 BasicBlock *BodyEntered = nullptr; 2331 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) { 2332 BasicBlock *EnterBB = P.first; 2333 BasicBlock *ExitBB = P.second; 2334 2335 if (BodyEnter) 2336 redirectTo(BodyEnter, EnterBB, DL); 2337 else 2338 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL); 2339 2340 BodyEnter = nullptr; 2341 BodyEntered = ExitBB; 2342 } 2343 2344 // Append the original loop nest body into the generated loop nest body. 2345 if (BodyEnter) 2346 redirectTo(BodyEnter, InnerEnter, DL); 2347 else 2348 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL); 2349 redirectAllPredecessorsTo(InnerLatch, Continue, DL); 2350 2351 // Replace the original induction variable with an induction variable computed 2352 // from the tile and floor induction variables. 2353 Builder.restoreIP(Result.back()->getBodyIP()); 2354 for (int i = 0; i < NumLoops; ++i) { 2355 CanonicalLoopInfo *FloorLoop = Result[i]; 2356 CanonicalLoopInfo *TileLoop = Result[NumLoops + i]; 2357 Value *OrigIndVar = OrigIndVars[i]; 2358 Value *Size = TileSizes[i]; 2359 2360 Value *Scale = 2361 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true); 2362 Value *Shift = 2363 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true); 2364 OrigIndVar->replaceAllUsesWith(Shift); 2365 } 2366 2367 // Remove unused parts of the original loops. 2368 removeUnusedBlocksFromParent(OldControlBBs); 2369 2370 for (CanonicalLoopInfo *L : Loops) 2371 L->invalidate(); 2372 2373 #ifndef NDEBUG 2374 for (CanonicalLoopInfo *GenL : Result) 2375 GenL->assertOK(); 2376 #endif 2377 return Result; 2378 } 2379 2380 /// Attach loop metadata \p Properties to the loop described by \p Loop. If the 2381 /// loop already has metadata, the loop properties are appended. 2382 static void addLoopMetadata(CanonicalLoopInfo *Loop, 2383 ArrayRef<Metadata *> Properties) { 2384 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo"); 2385 2386 // Nothing to do if no property to attach. 2387 if (Properties.empty()) 2388 return; 2389 2390 LLVMContext &Ctx = Loop->getFunction()->getContext(); 2391 SmallVector<Metadata *> NewLoopProperties; 2392 NewLoopProperties.push_back(nullptr); 2393 2394 // If the loop already has metadata, prepend it to the new metadata. 2395 BasicBlock *Latch = Loop->getLatch(); 2396 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch"); 2397 MDNode *Existing = Latch->getTerminator()->getMetadata(LLVMContext::MD_loop); 2398 if (Existing) 2399 append_range(NewLoopProperties, drop_begin(Existing->operands(), 1)); 2400 2401 append_range(NewLoopProperties, Properties); 2402 MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties); 2403 LoopID->replaceOperandWith(0, LoopID); 2404 2405 Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID); 2406 } 2407 2408 /// Attach llvm.access.group metadata to the memref instructions of \p Block 2409 static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup, 2410 LoopInfo &LI) { 2411 for (Instruction &I : *Block) { 2412 if (I.mayReadOrWriteMemory()) { 2413 // TODO: This instruction may already have access group from 2414 // other pragmas e.g. #pragma clang loop vectorize. Append 2415 // so that the existing metadata is not overwritten. 2416 I.setMetadata(LLVMContext::MD_access_group, AccessGroup); 2417 } 2418 } 2419 } 2420 2421 void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) { 2422 LLVMContext &Ctx = Builder.getContext(); 2423 addLoopMetadata( 2424 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2425 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))}); 2426 } 2427 2428 void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) { 2429 LLVMContext &Ctx = Builder.getContext(); 2430 addLoopMetadata( 2431 Loop, { 2432 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2433 }); 2434 } 2435 2436 void OpenMPIRBuilder::applySimd(DebugLoc, CanonicalLoopInfo *CanonicalLoop) { 2437 LLVMContext &Ctx = Builder.getContext(); 2438 2439 Function *F = CanonicalLoop->getFunction(); 2440 2441 FunctionAnalysisManager FAM; 2442 FAM.registerPass([]() { return DominatorTreeAnalysis(); }); 2443 FAM.registerPass([]() { return LoopAnalysis(); }); 2444 FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); 2445 2446 LoopAnalysis LIA; 2447 LoopInfo &&LI = LIA.run(*F, FAM); 2448 2449 Loop *L = LI.getLoopFor(CanonicalLoop->getHeader()); 2450 2451 SmallSet<BasicBlock *, 8> Reachable; 2452 2453 // Get the basic blocks from the loop in which memref instructions 2454 // can be found. 2455 // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo, 2456 // preferably without running any passes. 2457 for (BasicBlock *Block : L->getBlocks()) { 2458 if (Block == CanonicalLoop->getCond() || 2459 Block == CanonicalLoop->getHeader()) 2460 continue; 2461 Reachable.insert(Block); 2462 } 2463 2464 // Add access group metadata to memory-access instructions. 2465 MDNode *AccessGroup = MDNode::getDistinct(Ctx, {}); 2466 for (BasicBlock *BB : Reachable) 2467 addSimdMetadata(BB, AccessGroup, LI); 2468 2469 // Use the above access group metadata to create loop level 2470 // metadata, which should be distinct for each loop. 2471 ConstantAsMetadata *BoolConst = 2472 ConstantAsMetadata::get(ConstantInt::getTrue(Type::getInt1Ty(Ctx))); 2473 // TODO: If the loop has existing parallel access metadata, have 2474 // to combine two lists. 2475 addLoopMetadata( 2476 CanonicalLoop, 2477 {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), 2478 AccessGroup}), 2479 MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"), 2480 BoolConst})}); 2481 } 2482 2483 /// Create the TargetMachine object to query the backend for optimization 2484 /// preferences. 2485 /// 2486 /// Ideally, this would be passed from the front-end to the OpenMPBuilder, but 2487 /// e.g. Clang does not pass it to its CodeGen layer and creates it only when 2488 /// needed for the LLVM pass pipline. We use some default options to avoid 2489 /// having to pass too many settings from the frontend that probably do not 2490 /// matter. 2491 /// 2492 /// Currently, TargetMachine is only used sometimes by the unrollLoopPartial 2493 /// method. If we are going to use TargetMachine for more purposes, especially 2494 /// those that are sensitive to TargetOptions, RelocModel and CodeModel, it 2495 /// might become be worth requiring front-ends to pass on their TargetMachine, 2496 /// or at least cache it between methods. Note that while fontends such as Clang 2497 /// have just a single main TargetMachine per translation unit, "target-cpu" and 2498 /// "target-features" that determine the TargetMachine are per-function and can 2499 /// be overrided using __attribute__((target("OPTIONS"))). 2500 static std::unique_ptr<TargetMachine> 2501 createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) { 2502 Module *M = F->getParent(); 2503 2504 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString(); 2505 StringRef Features = F->getFnAttribute("target-features").getValueAsString(); 2506 const std::string &Triple = M->getTargetTriple(); 2507 2508 std::string Error; 2509 const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); 2510 if (!TheTarget) 2511 return {}; 2512 2513 llvm::TargetOptions Options; 2514 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine( 2515 Triple, CPU, Features, Options, /*RelocModel=*/None, /*CodeModel=*/None, 2516 OptLevel)); 2517 } 2518 2519 /// Heuristically determine the best-performant unroll factor for \p CLI. This 2520 /// depends on the target processor. We are re-using the same heuristics as the 2521 /// LoopUnrollPass. 2522 static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) { 2523 Function *F = CLI->getFunction(); 2524 2525 // Assume the user requests the most aggressive unrolling, even if the rest of 2526 // the code is optimized using a lower setting. 2527 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive; 2528 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel); 2529 2530 FunctionAnalysisManager FAM; 2531 FAM.registerPass([]() { return TargetLibraryAnalysis(); }); 2532 FAM.registerPass([]() { return AssumptionAnalysis(); }); 2533 FAM.registerPass([]() { return DominatorTreeAnalysis(); }); 2534 FAM.registerPass([]() { return LoopAnalysis(); }); 2535 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); }); 2536 FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); 2537 TargetIRAnalysis TIRA; 2538 if (TM) 2539 TIRA = TargetIRAnalysis( 2540 [&](const Function &F) { return TM->getTargetTransformInfo(F); }); 2541 FAM.registerPass([&]() { return TIRA; }); 2542 2543 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM); 2544 ScalarEvolutionAnalysis SEA; 2545 ScalarEvolution &&SE = SEA.run(*F, FAM); 2546 DominatorTreeAnalysis DTA; 2547 DominatorTree &&DT = DTA.run(*F, FAM); 2548 LoopAnalysis LIA; 2549 LoopInfo &&LI = LIA.run(*F, FAM); 2550 AssumptionAnalysis ACT; 2551 AssumptionCache &&AC = ACT.run(*F, FAM); 2552 OptimizationRemarkEmitter ORE{F}; 2553 2554 Loop *L = LI.getLoopFor(CLI->getHeader()); 2555 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop"); 2556 2557 TargetTransformInfo::UnrollingPreferences UP = 2558 gatherUnrollingPreferences(L, SE, TTI, 2559 /*BlockFrequencyInfo=*/nullptr, 2560 /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel, 2561 /*UserThreshold=*/None, 2562 /*UserCount=*/None, 2563 /*UserAllowPartial=*/true, 2564 /*UserAllowRuntime=*/true, 2565 /*UserUpperBound=*/None, 2566 /*UserFullUnrollMaxCount=*/None); 2567 2568 UP.Force = true; 2569 2570 // Account for additional optimizations taking place before the LoopUnrollPass 2571 // would unroll the loop. 2572 UP.Threshold *= UnrollThresholdFactor; 2573 UP.PartialThreshold *= UnrollThresholdFactor; 2574 2575 // Use normal unroll factors even if the rest of the code is optimized for 2576 // size. 2577 UP.OptSizeThreshold = UP.Threshold; 2578 UP.PartialOptSizeThreshold = UP.PartialThreshold; 2579 2580 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n" 2581 << " Threshold=" << UP.Threshold << "\n" 2582 << " PartialThreshold=" << UP.PartialThreshold << "\n" 2583 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" 2584 << " PartialOptSizeThreshold=" 2585 << UP.PartialOptSizeThreshold << "\n"); 2586 2587 // Disable peeling. 2588 TargetTransformInfo::PeelingPreferences PP = 2589 gatherPeelingPreferences(L, SE, TTI, 2590 /*UserAllowPeeling=*/false, 2591 /*UserAllowProfileBasedPeeling=*/false, 2592 /*UnrollingSpecficValues=*/false); 2593 2594 SmallPtrSet<const Value *, 32> EphValues; 2595 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 2596 2597 // Assume that reads and writes to stack variables can be eliminated by 2598 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's 2599 // size. 2600 for (BasicBlock *BB : L->blocks()) { 2601 for (Instruction &I : *BB) { 2602 Value *Ptr; 2603 if (auto *Load = dyn_cast<LoadInst>(&I)) { 2604 Ptr = Load->getPointerOperand(); 2605 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 2606 Ptr = Store->getPointerOperand(); 2607 } else 2608 continue; 2609 2610 Ptr = Ptr->stripPointerCasts(); 2611 2612 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) { 2613 if (Alloca->getParent() == &F->getEntryBlock()) 2614 EphValues.insert(&I); 2615 } 2616 } 2617 } 2618 2619 unsigned NumInlineCandidates; 2620 bool NotDuplicatable; 2621 bool Convergent; 2622 unsigned LoopSize = 2623 ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent, 2624 TTI, EphValues, UP.BEInsns); 2625 LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSize << "\n"); 2626 2627 // Loop is not unrollable if the loop contains certain instructions. 2628 if (NotDuplicatable || Convergent) { 2629 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n"); 2630 return 1; 2631 } 2632 2633 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might 2634 // be able to use it. 2635 int TripCount = 0; 2636 int MaxTripCount = 0; 2637 bool MaxOrZero = false; 2638 unsigned TripMultiple = 0; 2639 2640 bool UseUpperBound = false; 2641 computeUnrollCount(L, TTI, DT, &LI, SE, EphValues, &ORE, TripCount, 2642 MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP, 2643 UseUpperBound); 2644 unsigned Factor = UP.Count; 2645 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n"); 2646 2647 // This function returns 1 to signal to not unroll a loop. 2648 if (Factor == 0) 2649 return 1; 2650 return Factor; 2651 } 2652 2653 void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, 2654 int32_t Factor, 2655 CanonicalLoopInfo **UnrolledCLI) { 2656 assert(Factor >= 0 && "Unroll factor must not be negative"); 2657 2658 Function *F = Loop->getFunction(); 2659 LLVMContext &Ctx = F->getContext(); 2660 2661 // If the unrolled loop is not used for another loop-associated directive, it 2662 // is sufficient to add metadata for the LoopUnrollPass. 2663 if (!UnrolledCLI) { 2664 SmallVector<Metadata *, 2> LoopMetadata; 2665 LoopMetadata.push_back( 2666 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable"))); 2667 2668 if (Factor >= 1) { 2669 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2670 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2671 LoopMetadata.push_back(MDNode::get( 2672 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})); 2673 } 2674 2675 addLoopMetadata(Loop, LoopMetadata); 2676 return; 2677 } 2678 2679 // Heuristically determine the unroll factor. 2680 if (Factor == 0) 2681 Factor = computeHeuristicUnrollFactor(Loop); 2682 2683 // No change required with unroll factor 1. 2684 if (Factor == 1) { 2685 *UnrolledCLI = Loop; 2686 return; 2687 } 2688 2689 assert(Factor >= 2 && 2690 "unrolling only makes sense with a factor of 2 or larger"); 2691 2692 Type *IndVarTy = Loop->getIndVarType(); 2693 2694 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully 2695 // unroll the inner loop. 2696 Value *FactorVal = 2697 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor, 2698 /*isSigned=*/false)); 2699 std::vector<CanonicalLoopInfo *> LoopNest = 2700 tileLoops(DL, {Loop}, {FactorVal}); 2701 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling"); 2702 *UnrolledCLI = LoopNest[0]; 2703 CanonicalLoopInfo *InnerLoop = LoopNest[1]; 2704 2705 // LoopUnrollPass can only fully unroll loops with constant trip count. 2706 // Unroll by the unroll factor with a fallback epilog for the remainder 2707 // iterations if necessary. 2708 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2709 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2710 addLoopMetadata( 2711 InnerLoop, 2712 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2713 MDNode::get( 2714 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})}); 2715 2716 #ifndef NDEBUG 2717 (*UnrolledCLI)->assertOK(); 2718 #endif 2719 } 2720 2721 OpenMPIRBuilder::InsertPointTy 2722 OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc, 2723 llvm::Value *BufSize, llvm::Value *CpyBuf, 2724 llvm::Value *CpyFn, llvm::Value *DidIt) { 2725 if (!updateToLocation(Loc)) 2726 return Loc.IP; 2727 2728 uint32_t SrcLocStrSize; 2729 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2730 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2731 Value *ThreadId = getOrCreateThreadID(Ident); 2732 2733 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt); 2734 2735 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD}; 2736 2737 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate); 2738 Builder.CreateCall(Fn, Args); 2739 2740 return Builder.saveIP(); 2741 } 2742 2743 OpenMPIRBuilder::InsertPointTy 2744 OpenMPIRBuilder::createSingle(const LocationDescription &Loc, 2745 BodyGenCallbackTy BodyGenCB, 2746 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) { 2747 2748 if (!updateToLocation(Loc)) 2749 return Loc.IP; 2750 2751 // If needed (i.e. not null), initialize `DidIt` with 0 2752 if (DidIt) { 2753 Builder.CreateStore(Builder.getInt32(0), DidIt); 2754 } 2755 2756 Directive OMPD = Directive::OMPD_single; 2757 uint32_t SrcLocStrSize; 2758 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2759 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2760 Value *ThreadId = getOrCreateThreadID(Ident); 2761 Value *Args[] = {Ident, ThreadId}; 2762 2763 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single); 2764 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2765 2766 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single); 2767 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2768 2769 // generates the following: 2770 // if (__kmpc_single()) { 2771 // .... single region ... 2772 // __kmpc_end_single 2773 // } 2774 2775 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2776 /*Conditional*/ true, /*hasFinalize*/ true); 2777 } 2778 2779 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical( 2780 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2781 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) { 2782 2783 if (!updateToLocation(Loc)) 2784 return Loc.IP; 2785 2786 Directive OMPD = Directive::OMPD_critical; 2787 uint32_t SrcLocStrSize; 2788 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2789 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2790 Value *ThreadId = getOrCreateThreadID(Ident); 2791 Value *LockVar = getOMPCriticalRegionLock(CriticalName); 2792 Value *Args[] = {Ident, ThreadId, LockVar}; 2793 2794 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args)); 2795 Function *RTFn = nullptr; 2796 if (HintInst) { 2797 // Add Hint to entry Args and create call 2798 EnterArgs.push_back(HintInst); 2799 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint); 2800 } else { 2801 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical); 2802 } 2803 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs); 2804 2805 Function *ExitRTLFn = 2806 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical); 2807 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2808 2809 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2810 /*Conditional*/ false, /*hasFinalize*/ true); 2811 } 2812 2813 OpenMPIRBuilder::InsertPointTy 2814 OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc, 2815 InsertPointTy AllocaIP, unsigned NumLoops, 2816 ArrayRef<llvm::Value *> StoreValues, 2817 const Twine &Name, bool IsDependSource) { 2818 for (size_t I = 0; I < StoreValues.size(); I++) 2819 assert(StoreValues[I]->getType()->isIntegerTy(64) && 2820 "OpenMP runtime requires depend vec with i64 type"); 2821 2822 if (!updateToLocation(Loc)) 2823 return Loc.IP; 2824 2825 // Allocate space for vector and generate alloc instruction. 2826 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops); 2827 Builder.restoreIP(AllocaIP); 2828 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name); 2829 ArgsBase->setAlignment(Align(8)); 2830 Builder.restoreIP(Loc.IP); 2831 2832 // Store the index value with offset in depend vector. 2833 for (unsigned I = 0; I < NumLoops; ++I) { 2834 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP( 2835 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)}); 2836 StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter); 2837 STInst->setAlignment(Align(8)); 2838 } 2839 2840 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP( 2841 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)}); 2842 2843 uint32_t SrcLocStrSize; 2844 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2845 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2846 Value *ThreadId = getOrCreateThreadID(Ident); 2847 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP}; 2848 2849 Function *RTLFn = nullptr; 2850 if (IsDependSource) 2851 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post); 2852 else 2853 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait); 2854 Builder.CreateCall(RTLFn, Args); 2855 2856 return Builder.saveIP(); 2857 } 2858 2859 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd( 2860 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2861 FinalizeCallbackTy FiniCB, bool IsThreads) { 2862 if (!updateToLocation(Loc)) 2863 return Loc.IP; 2864 2865 Directive OMPD = Directive::OMPD_ordered; 2866 Instruction *EntryCall = nullptr; 2867 Instruction *ExitCall = nullptr; 2868 2869 if (IsThreads) { 2870 uint32_t SrcLocStrSize; 2871 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2872 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2873 Value *ThreadId = getOrCreateThreadID(Ident); 2874 Value *Args[] = {Ident, ThreadId}; 2875 2876 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered); 2877 EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2878 2879 Function *ExitRTLFn = 2880 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered); 2881 ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2882 } 2883 2884 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2885 /*Conditional*/ false, /*hasFinalize*/ true); 2886 } 2887 2888 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion( 2889 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, 2890 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional, 2891 bool HasFinalize, bool IsCancellable) { 2892 2893 if (HasFinalize) 2894 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable}); 2895 2896 // Create inlined region's entry and body blocks, in preparation 2897 // for conditional creation 2898 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2899 Instruction *SplitPos = EntryBB->getTerminator(); 2900 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2901 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB); 2902 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end"); 2903 BasicBlock *FiniBB = 2904 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize"); 2905 2906 Builder.SetInsertPoint(EntryBB->getTerminator()); 2907 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional); 2908 2909 // generate body 2910 BodyGenCB(/* AllocaIP */ InsertPointTy(), 2911 /* CodeGenIP */ Builder.saveIP(), *FiniBB); 2912 2913 // If we didn't emit a branch to FiniBB during body generation, it means 2914 // FiniBB is unreachable (e.g. while(1);). stop generating all the 2915 // unreachable blocks, and remove anything we are not going to use. 2916 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0); 2917 if (SkipEmittingRegion) { 2918 FiniBB->eraseFromParent(); 2919 ExitCall->eraseFromParent(); 2920 // Discard finalization if we have it. 2921 if (HasFinalize) { 2922 assert(!FinalizationStack.empty() && 2923 "Unexpected finalization stack state!"); 2924 FinalizationStack.pop_back(); 2925 } 2926 } else { 2927 // emit exit call and do any needed finalization. 2928 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt()); 2929 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 && 2930 FiniBB->getTerminator()->getSuccessor(0) == ExitBB && 2931 "Unexpected control flow graph state!!"); 2932 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize); 2933 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && 2934 "Unexpected Control Flow State!"); 2935 MergeBlockIntoPredecessor(FiniBB); 2936 } 2937 2938 // If we are skipping the region of a non conditional, remove the exit 2939 // block, and clear the builder's insertion point. 2940 assert(SplitPos->getParent() == ExitBB && 2941 "Unexpected Insertion point location!"); 2942 if (!Conditional && SkipEmittingRegion) { 2943 ExitBB->eraseFromParent(); 2944 Builder.ClearInsertionPoint(); 2945 } else { 2946 auto merged = MergeBlockIntoPredecessor(ExitBB); 2947 BasicBlock *ExitPredBB = SplitPos->getParent(); 2948 auto InsertBB = merged ? ExitPredBB : ExitBB; 2949 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2950 SplitPos->eraseFromParent(); 2951 Builder.SetInsertPoint(InsertBB); 2952 } 2953 2954 return Builder.saveIP(); 2955 } 2956 2957 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry( 2958 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) { 2959 // if nothing to do, Return current insertion point. 2960 if (!Conditional || !EntryCall) 2961 return Builder.saveIP(); 2962 2963 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2964 Value *CallBool = Builder.CreateIsNotNull(EntryCall); 2965 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body"); 2966 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB); 2967 2968 // Emit thenBB and set the Builder's insertion point there for 2969 // body generation next. Place the block after the current block. 2970 Function *CurFn = EntryBB->getParent(); 2971 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB); 2972 2973 // Move Entry branch to end of ThenBB, and replace with conditional 2974 // branch (If-stmt) 2975 Instruction *EntryBBTI = EntryBB->getTerminator(); 2976 Builder.CreateCondBr(CallBool, ThenBB, ExitBB); 2977 EntryBBTI->removeFromParent(); 2978 Builder.SetInsertPoint(UI); 2979 Builder.Insert(EntryBBTI); 2980 UI->eraseFromParent(); 2981 Builder.SetInsertPoint(ThenBB->getTerminator()); 2982 2983 // return an insertion point to ExitBB. 2984 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt()); 2985 } 2986 2987 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit( 2988 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, 2989 bool HasFinalize) { 2990 2991 Builder.restoreIP(FinIP); 2992 2993 // If there is finalization to do, emit it before the exit call 2994 if (HasFinalize) { 2995 assert(!FinalizationStack.empty() && 2996 "Unexpected finalization stack state!"); 2997 2998 FinalizationInfo Fi = FinalizationStack.pop_back_val(); 2999 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!"); 3000 3001 Fi.FiniCB(FinIP); 3002 3003 BasicBlock *FiniBB = FinIP.getBlock(); 3004 Instruction *FiniBBTI = FiniBB->getTerminator(); 3005 3006 // set Builder IP for call creation 3007 Builder.SetInsertPoint(FiniBBTI); 3008 } 3009 3010 if (!ExitCall) 3011 return Builder.saveIP(); 3012 3013 // place the Exitcall as last instruction before Finalization block terminator 3014 ExitCall->removeFromParent(); 3015 Builder.Insert(ExitCall); 3016 3017 return IRBuilder<>::InsertPoint(ExitCall->getParent(), 3018 ExitCall->getIterator()); 3019 } 3020 3021 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks( 3022 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, 3023 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) { 3024 if (!IP.isSet()) 3025 return IP; 3026 3027 IRBuilder<>::InsertPointGuard IPG(Builder); 3028 3029 // creates the following CFG structure 3030 // OMP_Entry : (MasterAddr != PrivateAddr)? 3031 // F T 3032 // | \ 3033 // | copin.not.master 3034 // | / 3035 // v / 3036 // copyin.not.master.end 3037 // | 3038 // v 3039 // OMP.Entry.Next 3040 3041 BasicBlock *OMP_Entry = IP.getBlock(); 3042 Function *CurFn = OMP_Entry->getParent(); 3043 BasicBlock *CopyBegin = 3044 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn); 3045 BasicBlock *CopyEnd = nullptr; 3046 3047 // If entry block is terminated, split to preserve the branch to following 3048 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is. 3049 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) { 3050 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(), 3051 "copyin.not.master.end"); 3052 OMP_Entry->getTerminator()->eraseFromParent(); 3053 } else { 3054 CopyEnd = 3055 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn); 3056 } 3057 3058 Builder.SetInsertPoint(OMP_Entry); 3059 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy); 3060 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy); 3061 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr); 3062 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd); 3063 3064 Builder.SetInsertPoint(CopyBegin); 3065 if (BranchtoEnd) 3066 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd)); 3067 3068 return Builder.saveIP(); 3069 } 3070 3071 CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc, 3072 Value *Size, Value *Allocator, 3073 std::string Name) { 3074 IRBuilder<>::InsertPointGuard IPG(Builder); 3075 Builder.restoreIP(Loc.IP); 3076 3077 uint32_t SrcLocStrSize; 3078 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3079 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3080 Value *ThreadId = getOrCreateThreadID(Ident); 3081 Value *Args[] = {ThreadId, Size, Allocator}; 3082 3083 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc); 3084 3085 return Builder.CreateCall(Fn, Args, Name); 3086 } 3087 3088 CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc, 3089 Value *Addr, Value *Allocator, 3090 std::string Name) { 3091 IRBuilder<>::InsertPointGuard IPG(Builder); 3092 Builder.restoreIP(Loc.IP); 3093 3094 uint32_t SrcLocStrSize; 3095 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3096 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3097 Value *ThreadId = getOrCreateThreadID(Ident); 3098 Value *Args[] = {ThreadId, Addr, Allocator}; 3099 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free); 3100 return Builder.CreateCall(Fn, Args, Name); 3101 } 3102 3103 CallInst *OpenMPIRBuilder::createOMPInteropInit( 3104 const LocationDescription &Loc, Value *InteropVar, 3105 omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, 3106 Value *DependenceAddress, bool HaveNowaitClause) { 3107 IRBuilder<>::InsertPointGuard IPG(Builder); 3108 Builder.restoreIP(Loc.IP); 3109 3110 uint32_t SrcLocStrSize; 3111 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3112 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3113 Value *ThreadId = getOrCreateThreadID(Ident); 3114 if (Device == nullptr) 3115 Device = ConstantInt::get(Int32, -1); 3116 Constant *InteropTypeVal = ConstantInt::get(Int64, (int)InteropType); 3117 if (NumDependences == nullptr) { 3118 NumDependences = ConstantInt::get(Int32, 0); 3119 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); 3120 DependenceAddress = ConstantPointerNull::get(PointerTypeVar); 3121 } 3122 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); 3123 Value *Args[] = { 3124 Ident, ThreadId, InteropVar, InteropTypeVal, 3125 Device, NumDependences, DependenceAddress, HaveNowaitClauseVal}; 3126 3127 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_init); 3128 3129 return Builder.CreateCall(Fn, Args); 3130 } 3131 3132 CallInst *OpenMPIRBuilder::createOMPInteropDestroy( 3133 const LocationDescription &Loc, Value *InteropVar, Value *Device, 3134 Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause) { 3135 IRBuilder<>::InsertPointGuard IPG(Builder); 3136 Builder.restoreIP(Loc.IP); 3137 3138 uint32_t SrcLocStrSize; 3139 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3140 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3141 Value *ThreadId = getOrCreateThreadID(Ident); 3142 if (Device == nullptr) 3143 Device = ConstantInt::get(Int32, -1); 3144 if (NumDependences == nullptr) { 3145 NumDependences = ConstantInt::get(Int32, 0); 3146 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); 3147 DependenceAddress = ConstantPointerNull::get(PointerTypeVar); 3148 } 3149 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); 3150 Value *Args[] = { 3151 Ident, ThreadId, InteropVar, Device, 3152 NumDependences, DependenceAddress, HaveNowaitClauseVal}; 3153 3154 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_destroy); 3155 3156 return Builder.CreateCall(Fn, Args); 3157 } 3158 3159 CallInst *OpenMPIRBuilder::createOMPInteropUse(const LocationDescription &Loc, 3160 Value *InteropVar, Value *Device, 3161 Value *NumDependences, 3162 Value *DependenceAddress, 3163 bool HaveNowaitClause) { 3164 IRBuilder<>::InsertPointGuard IPG(Builder); 3165 Builder.restoreIP(Loc.IP); 3166 uint32_t SrcLocStrSize; 3167 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3168 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3169 Value *ThreadId = getOrCreateThreadID(Ident); 3170 if (Device == nullptr) 3171 Device = ConstantInt::get(Int32, -1); 3172 if (NumDependences == nullptr) { 3173 NumDependences = ConstantInt::get(Int32, 0); 3174 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); 3175 DependenceAddress = ConstantPointerNull::get(PointerTypeVar); 3176 } 3177 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); 3178 Value *Args[] = { 3179 Ident, ThreadId, InteropVar, Device, 3180 NumDependences, DependenceAddress, HaveNowaitClauseVal}; 3181 3182 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_use); 3183 3184 return Builder.CreateCall(Fn, Args); 3185 } 3186 3187 CallInst *OpenMPIRBuilder::createCachedThreadPrivate( 3188 const LocationDescription &Loc, llvm::Value *Pointer, 3189 llvm::ConstantInt *Size, const llvm::Twine &Name) { 3190 IRBuilder<>::InsertPointGuard IPG(Builder); 3191 Builder.restoreIP(Loc.IP); 3192 3193 uint32_t SrcLocStrSize; 3194 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3195 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3196 Value *ThreadId = getOrCreateThreadID(Ident); 3197 Constant *ThreadPrivateCache = 3198 getOrCreateOMPInternalVariable(Int8PtrPtr, Name); 3199 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache}; 3200 3201 Function *Fn = 3202 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached); 3203 3204 return Builder.CreateCall(Fn, Args); 3205 } 3206 3207 OpenMPIRBuilder::InsertPointTy 3208 OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, 3209 bool RequiresFullRuntime) { 3210 if (!updateToLocation(Loc)) 3211 return Loc.IP; 3212 3213 uint32_t SrcLocStrSize; 3214 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3215 Constant *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3216 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 3217 IntegerType::getInt8Ty(Int8->getContext()), 3218 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 3219 ConstantInt *UseGenericStateMachine = 3220 ConstantInt::getBool(Int32->getContext(), !IsSPMD); 3221 ConstantInt *RequiresFullRuntimeVal = 3222 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 3223 3224 Function *Fn = getOrCreateRuntimeFunctionPtr( 3225 omp::RuntimeFunction::OMPRTL___kmpc_target_init); 3226 3227 CallInst *ThreadKind = Builder.CreateCall( 3228 Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal}); 3229 3230 Value *ExecUserCode = Builder.CreateICmpEQ( 3231 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), 3232 "exec_user_code"); 3233 3234 // ThreadKind = __kmpc_target_init(...) 3235 // if (ThreadKind == -1) 3236 // user_code 3237 // else 3238 // return; 3239 3240 auto *UI = Builder.CreateUnreachable(); 3241 BasicBlock *CheckBB = UI->getParent(); 3242 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry"); 3243 3244 BasicBlock *WorkerExitBB = BasicBlock::Create( 3245 CheckBB->getContext(), "worker.exit", CheckBB->getParent()); 3246 Builder.SetInsertPoint(WorkerExitBB); 3247 Builder.CreateRetVoid(); 3248 3249 auto *CheckBBTI = CheckBB->getTerminator(); 3250 Builder.SetInsertPoint(CheckBBTI); 3251 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB); 3252 3253 CheckBBTI->eraseFromParent(); 3254 UI->eraseFromParent(); 3255 3256 // Continue in the "user_code" block, see diagram above and in 3257 // openmp/libomptarget/deviceRTLs/common/include/target.h . 3258 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt()); 3259 } 3260 3261 void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc, 3262 bool IsSPMD, 3263 bool RequiresFullRuntime) { 3264 if (!updateToLocation(Loc)) 3265 return; 3266 3267 uint32_t SrcLocStrSize; 3268 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3269 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3270 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 3271 IntegerType::getInt8Ty(Int8->getContext()), 3272 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 3273 ConstantInt *RequiresFullRuntimeVal = 3274 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 3275 3276 Function *Fn = getOrCreateRuntimeFunctionPtr( 3277 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit); 3278 3279 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal}); 3280 } 3281 3282 std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts, 3283 StringRef FirstSeparator, 3284 StringRef Separator) { 3285 SmallString<128> Buffer; 3286 llvm::raw_svector_ostream OS(Buffer); 3287 StringRef Sep = FirstSeparator; 3288 for (StringRef Part : Parts) { 3289 OS << Sep << Part; 3290 Sep = Separator; 3291 } 3292 return OS.str().str(); 3293 } 3294 3295 Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable( 3296 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) { 3297 // TODO: Replace the twine arg with stringref to get rid of the conversion 3298 // logic. However This is taken from current implementation in clang as is. 3299 // Since this method is used in many places exclusively for OMP internal use 3300 // we will keep it as is for temporarily until we move all users to the 3301 // builder and then, if possible, fix it everywhere in one go. 3302 SmallString<256> Buffer; 3303 llvm::raw_svector_ostream Out(Buffer); 3304 Out << Name; 3305 StringRef RuntimeName = Out.str(); 3306 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first; 3307 if (Elem.second) { 3308 assert(cast<PointerType>(Elem.second->getType()) 3309 ->isOpaqueOrPointeeTypeMatches(Ty) && 3310 "OMP internal variable has different type than requested"); 3311 } else { 3312 // TODO: investigate the appropriate linkage type used for the global 3313 // variable for possibly changing that to internal or private, or maybe 3314 // create different versions of the function for different OMP internal 3315 // variables. 3316 Elem.second = new llvm::GlobalVariable( 3317 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage, 3318 llvm::Constant::getNullValue(Ty), Elem.first(), 3319 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 3320 AddressSpace); 3321 } 3322 3323 return Elem.second; 3324 } 3325 3326 Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) { 3327 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); 3328 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", "."); 3329 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name); 3330 } 3331 3332 GlobalVariable * 3333 OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, 3334 std::string VarName) { 3335 llvm::Constant *MaptypesArrayInit = 3336 llvm::ConstantDataArray::get(M.getContext(), Mappings); 3337 auto *MaptypesArrayGlobal = new llvm::GlobalVariable( 3338 M, MaptypesArrayInit->getType(), 3339 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit, 3340 VarName); 3341 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3342 return MaptypesArrayGlobal; 3343 } 3344 3345 void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc, 3346 InsertPointTy AllocaIP, 3347 unsigned NumOperands, 3348 struct MapperAllocas &MapperAllocas) { 3349 if (!updateToLocation(Loc)) 3350 return; 3351 3352 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 3353 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 3354 Builder.restoreIP(AllocaIP); 3355 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy); 3356 AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy); 3357 AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty); 3358 Builder.restoreIP(Loc.IP); 3359 MapperAllocas.ArgsBase = ArgsBase; 3360 MapperAllocas.Args = Args; 3361 MapperAllocas.ArgSizes = ArgSizes; 3362 } 3363 3364 void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc, 3365 Function *MapperFunc, Value *SrcLocInfo, 3366 Value *MaptypesArg, Value *MapnamesArg, 3367 struct MapperAllocas &MapperAllocas, 3368 int64_t DeviceID, unsigned NumOperands) { 3369 if (!updateToLocation(Loc)) 3370 return; 3371 3372 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 3373 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 3374 Value *ArgsBaseGEP = 3375 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase, 3376 {Builder.getInt32(0), Builder.getInt32(0)}); 3377 Value *ArgsGEP = 3378 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args, 3379 {Builder.getInt32(0), Builder.getInt32(0)}); 3380 Value *ArgSizesGEP = 3381 Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes, 3382 {Builder.getInt32(0), Builder.getInt32(0)}); 3383 Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo()); 3384 Builder.CreateCall(MapperFunc, 3385 {SrcLocInfo, Builder.getInt64(DeviceID), 3386 Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP, 3387 ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr}); 3388 } 3389 3390 bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic( 3391 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { 3392 assert(!(AO == AtomicOrdering::NotAtomic || 3393 AO == llvm::AtomicOrdering::Unordered) && 3394 "Unexpected Atomic Ordering."); 3395 3396 bool Flush = false; 3397 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic; 3398 3399 switch (AK) { 3400 case Read: 3401 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || 3402 AO == AtomicOrdering::SequentiallyConsistent) { 3403 FlushAO = AtomicOrdering::Acquire; 3404 Flush = true; 3405 } 3406 break; 3407 case Write: 3408 case Compare: 3409 case Update: 3410 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || 3411 AO == AtomicOrdering::SequentiallyConsistent) { 3412 FlushAO = AtomicOrdering::Release; 3413 Flush = true; 3414 } 3415 break; 3416 case Capture: 3417 switch (AO) { 3418 case AtomicOrdering::Acquire: 3419 FlushAO = AtomicOrdering::Acquire; 3420 Flush = true; 3421 break; 3422 case AtomicOrdering::Release: 3423 FlushAO = AtomicOrdering::Release; 3424 Flush = true; 3425 break; 3426 case AtomicOrdering::AcquireRelease: 3427 case AtomicOrdering::SequentiallyConsistent: 3428 FlushAO = AtomicOrdering::AcquireRelease; 3429 Flush = true; 3430 break; 3431 default: 3432 // do nothing - leave silently. 3433 break; 3434 } 3435 } 3436 3437 if (Flush) { 3438 // Currently Flush RT call still doesn't take memory_ordering, so for when 3439 // that happens, this tries to do the resolution of which atomic ordering 3440 // to use with but issue the flush call 3441 // TODO: pass `FlushAO` after memory ordering support is added 3442 (void)FlushAO; 3443 emitFlush(Loc); 3444 } 3445 3446 // for AO == AtomicOrdering::Monotonic and all other case combinations 3447 // do nothing 3448 return Flush; 3449 } 3450 3451 OpenMPIRBuilder::InsertPointTy 3452 OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, 3453 AtomicOpValue &X, AtomicOpValue &V, 3454 AtomicOrdering AO) { 3455 if (!updateToLocation(Loc)) 3456 return Loc.IP; 3457 3458 Type *XTy = X.Var->getType(); 3459 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3460 Type *XElemTy = X.ElemTy; 3461 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3462 XElemTy->isPointerTy()) && 3463 "OMP atomic read expected a scalar type"); 3464 3465 Value *XRead = nullptr; 3466 3467 if (XElemTy->isIntegerTy()) { 3468 LoadInst *XLD = 3469 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); 3470 XLD->setAtomic(AO); 3471 XRead = cast<Value>(XLD); 3472 } else { 3473 // We need to bitcast and perform atomic op as integer 3474 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3475 IntegerType *IntCastTy = 3476 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3477 Value *XBCast = Builder.CreateBitCast( 3478 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast"); 3479 LoadInst *XLoad = 3480 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load"); 3481 XLoad->setAtomic(AO); 3482 if (XElemTy->isFloatingPointTy()) { 3483 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast"); 3484 } else { 3485 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast"); 3486 } 3487 } 3488 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); 3489 Builder.CreateStore(XRead, V.Var, V.IsVolatile); 3490 return Builder.saveIP(); 3491 } 3492 3493 OpenMPIRBuilder::InsertPointTy 3494 OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, 3495 AtomicOpValue &X, Value *Expr, 3496 AtomicOrdering AO) { 3497 if (!updateToLocation(Loc)) 3498 return Loc.IP; 3499 3500 Type *XTy = X.Var->getType(); 3501 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3502 Type *XElemTy = X.ElemTy; 3503 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3504 XElemTy->isPointerTy()) && 3505 "OMP atomic write expected a scalar type"); 3506 3507 if (XElemTy->isIntegerTy()) { 3508 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); 3509 XSt->setAtomic(AO); 3510 } else { 3511 // We need to bitcast and perform atomic op as integers 3512 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3513 IntegerType *IntCastTy = 3514 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3515 Value *XBCast = Builder.CreateBitCast( 3516 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast"); 3517 Value *ExprCast = 3518 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast"); 3519 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile); 3520 XSt->setAtomic(AO); 3521 } 3522 3523 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); 3524 return Builder.saveIP(); 3525 } 3526 3527 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( 3528 const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, 3529 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3530 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) { 3531 assert(!isConflictIP(Loc.IP, AllocaIP) && "IPs must not be ambiguous"); 3532 if (!updateToLocation(Loc)) 3533 return Loc.IP; 3534 3535 LLVM_DEBUG({ 3536 Type *XTy = X.Var->getType(); 3537 assert(XTy->isPointerTy() && 3538 "OMP Atomic expects a pointer to target memory"); 3539 Type *XElemTy = X.ElemTy; 3540 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3541 XElemTy->isPointerTy()) && 3542 "OMP atomic update expected a scalar type"); 3543 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3544 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && 3545 "OpenMP atomic does not support LT or GT operations"); 3546 }); 3547 3548 emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, RMWOp, UpdateOp, 3549 X.IsVolatile, IsXBinopExpr); 3550 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); 3551 return Builder.saveIP(); 3552 } 3553 3554 Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, 3555 AtomicRMWInst::BinOp RMWOp) { 3556 switch (RMWOp) { 3557 case AtomicRMWInst::Add: 3558 return Builder.CreateAdd(Src1, Src2); 3559 case AtomicRMWInst::Sub: 3560 return Builder.CreateSub(Src1, Src2); 3561 case AtomicRMWInst::And: 3562 return Builder.CreateAnd(Src1, Src2); 3563 case AtomicRMWInst::Nand: 3564 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); 3565 case AtomicRMWInst::Or: 3566 return Builder.CreateOr(Src1, Src2); 3567 case AtomicRMWInst::Xor: 3568 return Builder.CreateXor(Src1, Src2); 3569 case AtomicRMWInst::Xchg: 3570 case AtomicRMWInst::FAdd: 3571 case AtomicRMWInst::FSub: 3572 case AtomicRMWInst::BAD_BINOP: 3573 case AtomicRMWInst::Max: 3574 case AtomicRMWInst::Min: 3575 case AtomicRMWInst::UMax: 3576 case AtomicRMWInst::UMin: 3577 llvm_unreachable("Unsupported atomic update operation"); 3578 } 3579 llvm_unreachable("Unsupported atomic update operation"); 3580 } 3581 3582 std::pair<Value *, Value *> OpenMPIRBuilder::emitAtomicUpdate( 3583 InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr, 3584 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3585 AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr) { 3586 bool DoCmpExch = (RMWOp == AtomicRMWInst::BAD_BINOP) || 3587 (RMWOp == AtomicRMWInst::FAdd) || 3588 (RMWOp == AtomicRMWInst::FSub) || 3589 (RMWOp == AtomicRMWInst::Sub && !IsXBinopExpr) || !XElemTy; 3590 3591 std::pair<Value *, Value *> Res; 3592 if (XElemTy->isIntegerTy() && !DoCmpExch) { 3593 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); 3594 // not needed except in case of postfix captures. Generate anyway for 3595 // consistency with the else part. Will be removed with any DCE pass. 3596 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); 3597 } else { 3598 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace(); 3599 IntegerType *IntCastTy = 3600 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3601 Value *XBCast = 3602 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3603 LoadInst *OldVal = 3604 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); 3605 OldVal->setAtomic(AO); 3606 // CurBB 3607 // | /---\ 3608 // ContBB | 3609 // | \---/ 3610 // ExitBB 3611 BasicBlock *CurBB = Builder.GetInsertBlock(); 3612 Instruction *CurBBTI = CurBB->getTerminator(); 3613 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); 3614 BasicBlock *ExitBB = 3615 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); 3616 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), 3617 X->getName() + ".atomic.cont"); 3618 ContBB->getTerminator()->eraseFromParent(); 3619 Builder.restoreIP(AllocaIP); 3620 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy); 3621 NewAtomicAddr->setName(X->getName() + "x.new.val"); 3622 Builder.SetInsertPoint(ContBB); 3623 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); 3624 PHI->addIncoming(OldVal, CurBB); 3625 IntegerType *NewAtomicCastTy = 3626 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3627 bool IsIntTy = XElemTy->isIntegerTy(); 3628 Value *NewAtomicIntAddr = 3629 (IsIntTy) 3630 ? NewAtomicAddr 3631 : Builder.CreateBitCast(NewAtomicAddr, 3632 NewAtomicCastTy->getPointerTo(Addrspace)); 3633 Value *OldExprVal = PHI; 3634 if (!IsIntTy) { 3635 if (XElemTy->isFloatingPointTy()) { 3636 OldExprVal = Builder.CreateBitCast(PHI, XElemTy, 3637 X->getName() + ".atomic.fltCast"); 3638 } else { 3639 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, 3640 X->getName() + ".atomic.ptrCast"); 3641 } 3642 } 3643 3644 Value *Upd = UpdateOp(OldExprVal, Builder); 3645 Builder.CreateStore(Upd, NewAtomicAddr); 3646 LoadInst *DesiredVal = Builder.CreateLoad(IntCastTy, NewAtomicIntAddr); 3647 Value *XAddr = 3648 (IsIntTy) 3649 ? X 3650 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3651 AtomicOrdering Failure = 3652 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 3653 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg( 3654 XAddr, PHI, DesiredVal, llvm::MaybeAlign(), AO, Failure); 3655 Result->setVolatile(VolatileX); 3656 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); 3657 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); 3658 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); 3659 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); 3660 3661 Res.first = OldExprVal; 3662 Res.second = Upd; 3663 3664 // set Insertion point in exit block 3665 if (UnreachableInst *ExitTI = 3666 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { 3667 CurBBTI->eraseFromParent(); 3668 Builder.SetInsertPoint(ExitBB); 3669 } else { 3670 Builder.SetInsertPoint(ExitTI); 3671 } 3672 } 3673 3674 return Res; 3675 } 3676 3677 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( 3678 const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, 3679 AtomicOpValue &V, Value *Expr, AtomicOrdering AO, 3680 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, 3681 bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) { 3682 if (!updateToLocation(Loc)) 3683 return Loc.IP; 3684 3685 LLVM_DEBUG({ 3686 Type *XTy = X.Var->getType(); 3687 assert(XTy->isPointerTy() && 3688 "OMP Atomic expects a pointer to target memory"); 3689 Type *XElemTy = X.ElemTy; 3690 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3691 XElemTy->isPointerTy()) && 3692 "OMP atomic capture expected a scalar type"); 3693 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3694 "OpenMP atomic does not support LT or GT operations"); 3695 }); 3696 3697 // If UpdateExpr is 'x' updated with some `expr` not based on 'x', 3698 // 'x' is simply atomically rewritten with 'expr'. 3699 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); 3700 std::pair<Value *, Value *> Result = 3701 emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, AtomicOp, UpdateOp, 3702 X.IsVolatile, IsXBinopExpr); 3703 3704 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); 3705 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile); 3706 3707 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); 3708 return Builder.saveIP(); 3709 } 3710 3711 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCompare( 3712 const LocationDescription &Loc, AtomicOpValue &X, Value *E, Value *D, 3713 AtomicOrdering AO, OMPAtomicCompareOp Op, bool IsXBinopExpr) { 3714 if (!updateToLocation(Loc)) 3715 return Loc.IP; 3716 3717 assert(X.Var->getType()->isPointerTy() && 3718 "OMP atomic expects a pointer to target memory"); 3719 assert((X.ElemTy->isIntegerTy() || X.ElemTy->isPointerTy()) && 3720 "OMP atomic compare expected a integer scalar type"); 3721 3722 if (Op == OMPAtomicCompareOp::EQ) { 3723 AtomicOrdering Failure = AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 3724 // We don't need the result for now. 3725 (void)Builder.CreateAtomicCmpXchg(X.Var, E, D, MaybeAlign(), AO, Failure); 3726 } else { 3727 assert((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && 3728 "Op should be either max or min at this point"); 3729 3730 // Reverse the ordop as the OpenMP forms are different from LLVM forms. 3731 // Let's take max as example. 3732 // OpenMP form: 3733 // x = x > expr ? expr : x; 3734 // LLVM form: 3735 // *ptr = *ptr > val ? *ptr : val; 3736 // We need to transform to LLVM form. 3737 // x = x <= expr ? x : expr; 3738 AtomicRMWInst::BinOp NewOp; 3739 if (IsXBinopExpr) { 3740 if (X.IsSigned) 3741 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Min 3742 : AtomicRMWInst::Max; 3743 else 3744 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMin 3745 : AtomicRMWInst::UMax; 3746 } else { 3747 if (X.IsSigned) 3748 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Max 3749 : AtomicRMWInst::Min; 3750 else 3751 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMax 3752 : AtomicRMWInst::UMin; 3753 } 3754 // We dont' need the result for now. 3755 (void)Builder.CreateAtomicRMW(NewOp, X.Var, E, MaybeAlign(), AO); 3756 } 3757 3758 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Compare); 3759 3760 return Builder.saveIP(); 3761 } 3762 3763 GlobalVariable * 3764 OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, 3765 std::string VarName) { 3766 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( 3767 llvm::ArrayType::get( 3768 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), 3769 Names); 3770 auto *MapNamesArrayGlobal = new llvm::GlobalVariable( 3771 M, MapNamesArrayInit->getType(), 3772 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit, 3773 VarName); 3774 return MapNamesArrayGlobal; 3775 } 3776 3777 // Create all simple and struct types exposed by the runtime and remember 3778 // the llvm::PointerTypes of them for easy access later. 3779 void OpenMPIRBuilder::initializeTypes(Module &M) { 3780 LLVMContext &Ctx = M.getContext(); 3781 StructType *T; 3782 #define OMP_TYPE(VarName, InitValue) VarName = InitValue; 3783 #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ 3784 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \ 3785 VarName##PtrTy = PointerType::getUnqual(VarName##Ty); 3786 #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ 3787 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \ 3788 VarName##Ptr = PointerType::getUnqual(VarName); 3789 #define OMP_STRUCT_TYPE(VarName, StructName, ...) \ 3790 T = StructType::getTypeByName(Ctx, StructName); \ 3791 if (!T) \ 3792 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \ 3793 VarName = T; \ 3794 VarName##Ptr = PointerType::getUnqual(T); 3795 #include "llvm/Frontend/OpenMP/OMPKinds.def" 3796 } 3797 3798 void OpenMPIRBuilder::OutlineInfo::collectBlocks( 3799 SmallPtrSetImpl<BasicBlock *> &BlockSet, 3800 SmallVectorImpl<BasicBlock *> &BlockVector) { 3801 SmallVector<BasicBlock *, 32> Worklist; 3802 BlockSet.insert(EntryBB); 3803 BlockSet.insert(ExitBB); 3804 3805 Worklist.push_back(EntryBB); 3806 while (!Worklist.empty()) { 3807 BasicBlock *BB = Worklist.pop_back_val(); 3808 BlockVector.push_back(BB); 3809 for (BasicBlock *SuccBB : successors(BB)) 3810 if (BlockSet.insert(SuccBB).second) 3811 Worklist.push_back(SuccBB); 3812 } 3813 } 3814 3815 void CanonicalLoopInfo::collectControlBlocks( 3816 SmallVectorImpl<BasicBlock *> &BBs) { 3817 // We only count those BBs as control block for which we do not need to 3818 // reverse the CFG, i.e. not the loop body which can contain arbitrary control 3819 // flow. For consistency, this also means we do not add the Body block, which 3820 // is just the entry to the body code. 3821 BBs.reserve(BBs.size() + 6); 3822 BBs.append({getPreheader(), Header, Cond, Latch, Exit, getAfter()}); 3823 } 3824 3825 BasicBlock *CanonicalLoopInfo::getPreheader() const { 3826 assert(isValid() && "Requires a valid canonical loop"); 3827 for (BasicBlock *Pred : predecessors(Header)) { 3828 if (Pred != Latch) 3829 return Pred; 3830 } 3831 llvm_unreachable("Missing preheader"); 3832 } 3833 3834 void CanonicalLoopInfo::setTripCount(Value *TripCount) { 3835 assert(isValid() && "Requires a valid canonical loop"); 3836 3837 Instruction *CmpI = &getCond()->front(); 3838 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); 3839 CmpI->setOperand(1, TripCount); 3840 3841 #ifndef NDEBUG 3842 assertOK(); 3843 #endif 3844 } 3845 3846 void CanonicalLoopInfo::mapIndVar( 3847 llvm::function_ref<Value *(Instruction *)> Updater) { 3848 assert(isValid() && "Requires a valid canonical loop"); 3849 3850 Instruction *OldIV = getIndVar(); 3851 3852 // Record all uses excluding those introduced by the updater. Uses by the 3853 // CanonicalLoopInfo itself to keep track of the number of iterations are 3854 // excluded. 3855 SmallVector<Use *> ReplacableUses; 3856 for (Use &U : OldIV->uses()) { 3857 auto *User = dyn_cast<Instruction>(U.getUser()); 3858 if (!User) 3859 continue; 3860 if (User->getParent() == getCond()) 3861 continue; 3862 if (User->getParent() == getLatch()) 3863 continue; 3864 ReplacableUses.push_back(&U); 3865 } 3866 3867 // Run the updater that may introduce new uses 3868 Value *NewIV = Updater(OldIV); 3869 3870 // Replace the old uses with the value returned by the updater. 3871 for (Use *U : ReplacableUses) 3872 U->set(NewIV); 3873 3874 #ifndef NDEBUG 3875 assertOK(); 3876 #endif 3877 } 3878 3879 void CanonicalLoopInfo::assertOK() const { 3880 #ifndef NDEBUG 3881 // No constraints if this object currently does not describe a loop. 3882 if (!isValid()) 3883 return; 3884 3885 BasicBlock *Preheader = getPreheader(); 3886 BasicBlock *Body = getBody(); 3887 BasicBlock *After = getAfter(); 3888 3889 // Verify standard control-flow we use for OpenMP loops. 3890 assert(Preheader); 3891 assert(isa<BranchInst>(Preheader->getTerminator()) && 3892 "Preheader must terminate with unconditional branch"); 3893 assert(Preheader->getSingleSuccessor() == Header && 3894 "Preheader must jump to header"); 3895 3896 assert(Header); 3897 assert(isa<BranchInst>(Header->getTerminator()) && 3898 "Header must terminate with unconditional branch"); 3899 assert(Header->getSingleSuccessor() == Cond && 3900 "Header must jump to exiting block"); 3901 3902 assert(Cond); 3903 assert(Cond->getSinglePredecessor() == Header && 3904 "Exiting block only reachable from header"); 3905 3906 assert(isa<BranchInst>(Cond->getTerminator()) && 3907 "Exiting block must terminate with conditional branch"); 3908 assert(size(successors(Cond)) == 2 && 3909 "Exiting block must have two successors"); 3910 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && 3911 "Exiting block's first successor jump to the body"); 3912 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && 3913 "Exiting block's second successor must exit the loop"); 3914 3915 assert(Body); 3916 assert(Body->getSinglePredecessor() == Cond && 3917 "Body only reachable from exiting block"); 3918 assert(!isa<PHINode>(Body->front())); 3919 3920 assert(Latch); 3921 assert(isa<BranchInst>(Latch->getTerminator()) && 3922 "Latch must terminate with unconditional branch"); 3923 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header"); 3924 // TODO: To support simple redirecting of the end of the body code that has 3925 // multiple; introduce another auxiliary basic block like preheader and after. 3926 assert(Latch->getSinglePredecessor() != nullptr); 3927 assert(!isa<PHINode>(Latch->front())); 3928 3929 assert(Exit); 3930 assert(isa<BranchInst>(Exit->getTerminator()) && 3931 "Exit block must terminate with unconditional branch"); 3932 assert(Exit->getSingleSuccessor() == After && 3933 "Exit block must jump to after block"); 3934 3935 assert(After); 3936 assert(After->getSinglePredecessor() == Exit && 3937 "After block only reachable from exit block"); 3938 assert(After->empty() || !isa<PHINode>(After->front())); 3939 3940 Instruction *IndVar = getIndVar(); 3941 assert(IndVar && "Canonical induction variable not found?"); 3942 assert(isa<IntegerType>(IndVar->getType()) && 3943 "Induction variable must be an integer"); 3944 assert(cast<PHINode>(IndVar)->getParent() == Header && 3945 "Induction variable must be a PHI in the loop header"); 3946 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader); 3947 assert( 3948 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()); 3949 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch); 3950 3951 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1); 3952 assert(cast<Instruction>(NextIndVar)->getParent() == Latch); 3953 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add); 3954 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar); 3955 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) 3956 ->isOne()); 3957 3958 Value *TripCount = getTripCount(); 3959 assert(TripCount && "Loop trip count not found?"); 3960 assert(IndVar->getType() == TripCount->getType() && 3961 "Trip count and induction variable must have the same type"); 3962 3963 auto *CmpI = cast<CmpInst>(&Cond->front()); 3964 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT && 3965 "Exit condition must be a signed less-than comparison"); 3966 assert(CmpI->getOperand(0) == IndVar && 3967 "Exit condition must compare the induction variable"); 3968 assert(CmpI->getOperand(1) == TripCount && 3969 "Exit condition must compare with the trip count"); 3970 #endif 3971 } 3972 3973 void CanonicalLoopInfo::invalidate() { 3974 Header = nullptr; 3975 Cond = nullptr; 3976 Latch = nullptr; 3977 Exit = nullptr; 3978 } 3979