1 //===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// This file implements the OpenMPIRBuilder class, which is used as a 11 /// convenient way to create LLVM instructions for OpenMP directives. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 16 #include "llvm/ADT/StringRef.h" 17 #include "llvm/ADT/Triple.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/CodeMetrics.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ScalarEvolution.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DebugInfo.h" 26 #include "llvm/IR/GlobalVariable.h" 27 #include "llvm/IR/IRBuilder.h" 28 #include "llvm/IR/MDBuilder.h" 29 #include "llvm/IR/PassManager.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/MC/TargetRegistry.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/Error.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include "llvm/Target/TargetOptions.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/CodeExtractor.h" 38 #include "llvm/Transforms/Utils/LoopPeel.h" 39 #include "llvm/Transforms/Utils/ModuleUtils.h" 40 #include "llvm/Transforms/Utils/UnrollLoop.h" 41 42 #include <cstdint> 43 #include <sstream> 44 45 #define DEBUG_TYPE "openmp-ir-builder" 46 47 using namespace llvm; 48 using namespace omp; 49 50 static cl::opt<bool> 51 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden, 52 cl::desc("Use optimistic attributes describing " 53 "'as-if' properties of runtime calls."), 54 cl::init(false)); 55 56 static cl::opt<double> UnrollThresholdFactor( 57 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden, 58 cl::desc("Factor for the unroll threshold to account for code " 59 "simplifications still taking place"), 60 cl::init(1.5)); 61 62 void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) { 63 LLVMContext &Ctx = Fn.getContext(); 64 65 // Get the function's current attributes. 66 auto Attrs = Fn.getAttributes(); 67 auto FnAttrs = Attrs.getFnAttrs(); 68 auto RetAttrs = Attrs.getRetAttrs(); 69 SmallVector<AttributeSet, 4> ArgAttrs; 70 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo) 71 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo)); 72 73 #define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet; 74 #include "llvm/Frontend/OpenMP/OMPKinds.def" 75 76 // Add attributes to the function declaration. 77 switch (FnID) { 78 #define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \ 79 case Enum: \ 80 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \ 81 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \ 82 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \ 83 ArgAttrs[ArgNo] = \ 84 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \ 85 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \ 86 break; 87 #include "llvm/Frontend/OpenMP/OMPKinds.def" 88 default: 89 // Attributes are optional. 90 break; 91 } 92 } 93 94 FunctionCallee 95 OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) { 96 FunctionType *FnTy = nullptr; 97 Function *Fn = nullptr; 98 99 // Try to find the declation in the module first. 100 switch (FnID) { 101 #define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \ 102 case Enum: \ 103 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \ 104 IsVarArg); \ 105 Fn = M.getFunction(Str); \ 106 break; 107 #include "llvm/Frontend/OpenMP/OMPKinds.def" 108 } 109 110 if (!Fn) { 111 // Create a new declaration if we need one. 112 switch (FnID) { 113 #define OMP_RTL(Enum, Str, ...) \ 114 case Enum: \ 115 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \ 116 break; 117 #include "llvm/Frontend/OpenMP/OMPKinds.def" 118 } 119 120 // Add information if the runtime function takes a callback function 121 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) { 122 if (!Fn->hasMetadata(LLVMContext::MD_callback)) { 123 LLVMContext &Ctx = Fn->getContext(); 124 MDBuilder MDB(Ctx); 125 // Annotate the callback behavior of the runtime function: 126 // - The callback callee is argument number 2 (microtask). 127 // - The first two arguments of the callback callee are unknown (-1). 128 // - All variadic arguments to the runtime function are passed to the 129 // callback callee. 130 Fn->addMetadata( 131 LLVMContext::MD_callback, 132 *MDNode::get(Ctx, {MDB.createCallbackEncoding( 133 2, {-1, -1}, /* VarArgsArePassed */ true)})); 134 } 135 } 136 137 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName() 138 << " with type " << *Fn->getFunctionType() << "\n"); 139 addAttributes(FnID, *Fn); 140 141 } else { 142 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName() 143 << " with type " << *Fn->getFunctionType() << "\n"); 144 } 145 146 assert(Fn && "Failed to create OpenMP runtime function"); 147 148 // Cast the function to the expected type if necessary 149 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo()); 150 return {FnTy, C}; 151 } 152 153 Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) { 154 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID); 155 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee()); 156 assert(Fn && "Failed to create OpenMP runtime function pointer"); 157 return Fn; 158 } 159 160 void OpenMPIRBuilder::initialize() { initializeTypes(M); } 161 162 void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) { 163 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 164 SmallVector<BasicBlock *, 32> Blocks; 165 SmallVector<OutlineInfo, 16> DeferredOutlines; 166 for (OutlineInfo &OI : OutlineInfos) { 167 // Skip functions that have not finalized yet; may happen with nested 168 // function generation. 169 if (Fn && OI.getFunction() != Fn) { 170 DeferredOutlines.push_back(OI); 171 continue; 172 } 173 174 ParallelRegionBlockSet.clear(); 175 Blocks.clear(); 176 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 177 178 Function *OuterFn = OI.getFunction(); 179 CodeExtractorAnalysisCache CEAC(*OuterFn); 180 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 181 /* AggregateArgs */ false, 182 /* BlockFrequencyInfo */ nullptr, 183 /* BranchProbabilityInfo */ nullptr, 184 /* AssumptionCache */ nullptr, 185 /* AllowVarArgs */ true, 186 /* AllowAlloca */ true, 187 /* Suffix */ ".omp_par"); 188 189 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n"); 190 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName() 191 << " Exit: " << OI.ExitBB->getName() << "\n"); 192 assert(Extractor.isEligible() && 193 "Expected OpenMP outlining to be possible!"); 194 195 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC); 196 197 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n"); 198 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n"); 199 assert(OutlinedFn->getReturnType()->isVoidTy() && 200 "OpenMP outlined functions should not return a value!"); 201 202 // For compability with the clang CG we move the outlined function after the 203 // one with the parallel region. 204 OutlinedFn->removeFromParent(); 205 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn); 206 207 // Remove the artificial entry introduced by the extractor right away, we 208 // made our own entry block after all. 209 { 210 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock(); 211 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB); 212 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry); 213 if (AllowExtractorSinking) { 214 // Move instructions from the to-be-deleted ArtificialEntry to the entry 215 // basic block of the parallel region. CodeExtractor may have sunk 216 // allocas/bitcasts for values that are solely used in the outlined 217 // region and do not escape. 218 assert(!ArtificialEntry.empty() && 219 "Expected instructions to sink in the outlined region"); 220 for (BasicBlock::iterator It = ArtificialEntry.begin(), 221 End = ArtificialEntry.end(); 222 It != End;) { 223 Instruction &I = *It; 224 It++; 225 226 if (I.isTerminator()) 227 continue; 228 229 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt()); 230 } 231 } 232 OI.EntryBB->moveBefore(&ArtificialEntry); 233 ArtificialEntry.eraseFromParent(); 234 } 235 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB); 236 assert(OutlinedFn && OutlinedFn->getNumUses() == 1); 237 238 // Run a user callback, e.g. to add attributes. 239 if (OI.PostOutlineCB) 240 OI.PostOutlineCB(*OutlinedFn); 241 } 242 243 // Remove work items that have been completed. 244 OutlineInfos = std::move(DeferredOutlines); 245 } 246 247 OpenMPIRBuilder::~OpenMPIRBuilder() { 248 assert(OutlineInfos.empty() && "There must be no outstanding outlinings"); 249 } 250 251 GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) { 252 IntegerType *I32Ty = Type::getInt32Ty(M.getContext()); 253 auto *GV = 254 new GlobalVariable(M, I32Ty, 255 /* isConstant = */ true, GlobalValue::WeakODRLinkage, 256 ConstantInt::get(I32Ty, Value), Name); 257 GV->setVisibility(GlobalValue::HiddenVisibility); 258 259 return GV; 260 } 261 262 Constant *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr, 263 uint32_t SrcLocStrSize, 264 IdentFlag LocFlags, 265 unsigned Reserve2Flags) { 266 // Enable "C-mode". 267 LocFlags |= OMP_IDENT_FLAG_KMPC; 268 269 Constant *&Ident = 270 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}]; 271 if (!Ident) { 272 Constant *I32Null = ConstantInt::getNullValue(Int32); 273 Constant *IdentData[] = {I32Null, 274 ConstantInt::get(Int32, uint32_t(LocFlags)), 275 ConstantInt::get(Int32, Reserve2Flags), 276 ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr}; 277 Constant *Initializer = 278 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData); 279 280 // Look for existing encoding of the location + flags, not needed but 281 // minimizes the difference to the existing solution while we transition. 282 for (GlobalVariable &GV : M.getGlobalList()) 283 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer()) 284 if (GV.getInitializer() == Initializer) 285 Ident = &GV; 286 287 if (!Ident) { 288 auto *GV = new GlobalVariable( 289 M, OpenMPIRBuilder::Ident, 290 /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "", 291 nullptr, GlobalValue::NotThreadLocal, 292 M.getDataLayout().getDefaultGlobalsAddressSpace()); 293 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 294 GV->setAlignment(Align(8)); 295 Ident = GV; 296 } 297 } 298 299 return ConstantExpr::getPointerBitCastOrAddrSpaceCast(Ident, IdentPtr); 300 } 301 302 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr, 303 uint32_t &SrcLocStrSize) { 304 SrcLocStrSize = LocStr.size(); 305 Constant *&SrcLocStr = SrcLocStrMap[LocStr]; 306 if (!SrcLocStr) { 307 Constant *Initializer = 308 ConstantDataArray::getString(M.getContext(), LocStr); 309 310 // Look for existing encoding of the location, not needed but minimizes the 311 // difference to the existing solution while we transition. 312 for (GlobalVariable &GV : M.getGlobalList()) 313 if (GV.isConstant() && GV.hasInitializer() && 314 GV.getInitializer() == Initializer) 315 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr); 316 317 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "", 318 /* AddressSpace */ 0, &M); 319 } 320 return SrcLocStr; 321 } 322 323 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName, 324 StringRef FileName, 325 unsigned Line, unsigned Column, 326 uint32_t &SrcLocStrSize) { 327 SmallString<128> Buffer; 328 Buffer.push_back(';'); 329 Buffer.append(FileName); 330 Buffer.push_back(';'); 331 Buffer.append(FunctionName); 332 Buffer.push_back(';'); 333 Buffer.append(std::to_string(Line)); 334 Buffer.push_back(';'); 335 Buffer.append(std::to_string(Column)); 336 Buffer.push_back(';'); 337 Buffer.push_back(';'); 338 return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize); 339 } 340 341 Constant * 342 OpenMPIRBuilder::getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize) { 343 StringRef UnknownLoc = ";unknown;unknown;0;0;;"; 344 return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize); 345 } 346 347 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, 348 uint32_t &SrcLocStrSize, 349 Function *F) { 350 DILocation *DIL = DL.get(); 351 if (!DIL) 352 return getOrCreateDefaultSrcLocStr(SrcLocStrSize); 353 StringRef FileName = M.getName(); 354 if (DIFile *DIF = DIL->getFile()) 355 if (Optional<StringRef> Source = DIF->getSource()) 356 FileName = *Source; 357 StringRef Function = DIL->getScope()->getSubprogram()->getName(); 358 if (Function.empty() && F) 359 Function = F->getName(); 360 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(), 361 DIL->getColumn(), SrcLocStrSize); 362 } 363 364 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc, 365 uint32_t &SrcLocStrSize) { 366 return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize, 367 Loc.IP.getBlock()->getParent()); 368 } 369 370 Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) { 371 return Builder.CreateCall( 372 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident, 373 "omp_global_thread_num"); 374 } 375 376 OpenMPIRBuilder::InsertPointTy 377 OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK, 378 bool ForceSimpleCall, bool CheckCancelFlag) { 379 if (!updateToLocation(Loc)) 380 return Loc.IP; 381 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag); 382 } 383 384 OpenMPIRBuilder::InsertPointTy 385 OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind, 386 bool ForceSimpleCall, bool CheckCancelFlag) { 387 // Build call __kmpc_cancel_barrier(loc, thread_id) or 388 // __kmpc_barrier(loc, thread_id); 389 390 IdentFlag BarrierLocFlags; 391 switch (Kind) { 392 case OMPD_for: 393 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR; 394 break; 395 case OMPD_sections: 396 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS; 397 break; 398 case OMPD_single: 399 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE; 400 break; 401 case OMPD_barrier: 402 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL; 403 break; 404 default: 405 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL; 406 break; 407 } 408 409 uint32_t SrcLocStrSize; 410 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 411 Value *Args[] = { 412 getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags), 413 getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))}; 414 415 // If we are in a cancellable parallel region, barriers are cancellation 416 // points. 417 // TODO: Check why we would force simple calls or to ignore the cancel flag. 418 bool UseCancelBarrier = 419 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel); 420 421 Value *Result = 422 Builder.CreateCall(getOrCreateRuntimeFunctionPtr( 423 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier 424 : OMPRTL___kmpc_barrier), 425 Args); 426 427 if (UseCancelBarrier && CheckCancelFlag) 428 emitCancelationCheckImpl(Result, OMPD_parallel); 429 430 return Builder.saveIP(); 431 } 432 433 OpenMPIRBuilder::InsertPointTy 434 OpenMPIRBuilder::createCancel(const LocationDescription &Loc, 435 Value *IfCondition, 436 omp::Directive CanceledDirective) { 437 if (!updateToLocation(Loc)) 438 return Loc.IP; 439 440 // LLVM utilities like blocks with terminators. 441 auto *UI = Builder.CreateUnreachable(); 442 443 Instruction *ThenTI = UI, *ElseTI = nullptr; 444 if (IfCondition) 445 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 446 Builder.SetInsertPoint(ThenTI); 447 448 Value *CancelKind = nullptr; 449 switch (CanceledDirective) { 450 #define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \ 451 case DirectiveEnum: \ 452 CancelKind = Builder.getInt32(Value); \ 453 break; 454 #include "llvm/Frontend/OpenMP/OMPKinds.def" 455 default: 456 llvm_unreachable("Unknown cancel kind!"); 457 } 458 459 uint32_t SrcLocStrSize; 460 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 461 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 462 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind}; 463 Value *Result = Builder.CreateCall( 464 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args); 465 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) { 466 if (CanceledDirective == OMPD_parallel) { 467 IRBuilder<>::InsertPointGuard IPG(Builder); 468 Builder.restoreIP(IP); 469 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), 470 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, 471 /* CheckCancelFlag */ false); 472 } 473 }; 474 475 // The actual cancel logic is shared with others, e.g., cancel_barriers. 476 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB); 477 478 // Update the insertion point and remove the terminator we introduced. 479 Builder.SetInsertPoint(UI->getParent()); 480 UI->eraseFromParent(); 481 482 return Builder.saveIP(); 483 } 484 485 void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, 486 omp::Directive CanceledDirective, 487 FinalizeCallbackTy ExitCB) { 488 assert(isLastFinalizationInfoCancellable(CanceledDirective) && 489 "Unexpected cancellation!"); 490 491 // For a cancel barrier we create two new blocks. 492 BasicBlock *BB = Builder.GetInsertBlock(); 493 BasicBlock *NonCancellationBlock; 494 if (Builder.GetInsertPoint() == BB->end()) { 495 // TODO: This branch will not be needed once we moved to the 496 // OpenMPIRBuilder codegen completely. 497 NonCancellationBlock = BasicBlock::Create( 498 BB->getContext(), BB->getName() + ".cont", BB->getParent()); 499 } else { 500 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint()); 501 BB->getTerminator()->eraseFromParent(); 502 Builder.SetInsertPoint(BB); 503 } 504 BasicBlock *CancellationBlock = BasicBlock::Create( 505 BB->getContext(), BB->getName() + ".cncl", BB->getParent()); 506 507 // Jump to them based on the return value. 508 Value *Cmp = Builder.CreateIsNull(CancelFlag); 509 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock, 510 /* TODO weight */ nullptr, nullptr); 511 512 // From the cancellation block we finalize all variables and go to the 513 // post finalization block that is known to the FiniCB callback. 514 Builder.SetInsertPoint(CancellationBlock); 515 if (ExitCB) 516 ExitCB(Builder.saveIP()); 517 auto &FI = FinalizationStack.back(); 518 FI.FiniCB(Builder.saveIP()); 519 520 // The continuation block is where code generation continues. 521 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin()); 522 } 523 524 IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel( 525 const LocationDescription &Loc, InsertPointTy OuterAllocaIP, 526 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, 527 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, 528 omp::ProcBindKind ProcBind, bool IsCancellable) { 529 if (!updateToLocation(Loc)) 530 return Loc.IP; 531 532 uint32_t SrcLocStrSize; 533 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 534 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 535 Value *ThreadID = getOrCreateThreadID(Ident); 536 537 if (NumThreads) { 538 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads) 539 Value *Args[] = { 540 Ident, ThreadID, 541 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)}; 542 Builder.CreateCall( 543 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args); 544 } 545 546 if (ProcBind != OMP_PROC_BIND_default) { 547 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind) 548 Value *Args[] = { 549 Ident, ThreadID, 550 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)}; 551 Builder.CreateCall( 552 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args); 553 } 554 555 BasicBlock *InsertBB = Builder.GetInsertBlock(); 556 Function *OuterFn = InsertBB->getParent(); 557 558 // Save the outer alloca block because the insertion iterator may get 559 // invalidated and we still need this later. 560 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock(); 561 562 // Vector to remember instructions we used only during the modeling but which 563 // we want to delete at the end. 564 SmallVector<Instruction *, 4> ToBeDeleted; 565 566 // Change the location to the outer alloca insertion point to create and 567 // initialize the allocas we pass into the parallel region. 568 Builder.restoreIP(OuterAllocaIP); 569 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr"); 570 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr"); 571 572 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the 573 // program, otherwise we only need them for modeling purposes to get the 574 // associated arguments in the outlined function. In the former case, 575 // initialize the allocas properly, in the latter case, delete them later. 576 if (IfCondition) { 577 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr); 578 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr); 579 } else { 580 ToBeDeleted.push_back(TIDAddr); 581 ToBeDeleted.push_back(ZeroAddr); 582 } 583 584 // Create an artificial insertion point that will also ensure the blocks we 585 // are about to split are not degenerated. 586 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB); 587 588 Instruction *ThenTI = UI, *ElseTI = nullptr; 589 if (IfCondition) 590 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 591 592 BasicBlock *ThenBB = ThenTI->getParent(); 593 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry"); 594 BasicBlock *PRegBodyBB = 595 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region"); 596 BasicBlock *PRegPreFiniBB = 597 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize"); 598 BasicBlock *PRegExitBB = 599 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit"); 600 601 auto FiniCBWrapper = [&](InsertPointTy IP) { 602 // Hide "open-ended" blocks from the given FiniCB by setting the right jump 603 // target to the region exit block. 604 if (IP.getBlock()->end() == IP.getPoint()) { 605 IRBuilder<>::InsertPointGuard IPG(Builder); 606 Builder.restoreIP(IP); 607 Instruction *I = Builder.CreateBr(PRegExitBB); 608 IP = InsertPointTy(I->getParent(), I->getIterator()); 609 } 610 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && 611 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && 612 "Unexpected insertion point for finalization call!"); 613 return FiniCB(IP); 614 }; 615 616 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable}); 617 618 // Generate the privatization allocas in the block that will become the entry 619 // of the outlined function. 620 Builder.SetInsertPoint(PRegEntryBB->getTerminator()); 621 InsertPointTy InnerAllocaIP = Builder.saveIP(); 622 623 AllocaInst *PrivTIDAddr = 624 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local"); 625 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid"); 626 627 // Add some fake uses for OpenMP provided arguments. 628 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use")); 629 Instruction *ZeroAddrUse = 630 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use"); 631 ToBeDeleted.push_back(ZeroAddrUse); 632 633 // ThenBB 634 // | 635 // V 636 // PRegionEntryBB <- Privatization allocas are placed here. 637 // | 638 // V 639 // PRegionBodyBB <- BodeGen is invoked here. 640 // | 641 // V 642 // PRegPreFiniBB <- The block we will start finalization from. 643 // | 644 // V 645 // PRegionExitBB <- A common exit to simplify block collection. 646 // 647 648 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n"); 649 650 // Let the caller create the body. 651 assert(BodyGenCB && "Expected body generation callback!"); 652 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin()); 653 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB); 654 655 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n"); 656 657 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call); 658 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) { 659 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) { 660 llvm::LLVMContext &Ctx = F->getContext(); 661 MDBuilder MDB(Ctx); 662 // Annotate the callback behavior of the __kmpc_fork_call: 663 // - The callback callee is argument number 2 (microtask). 664 // - The first two arguments of the callback callee are unknown (-1). 665 // - All variadic arguments to the __kmpc_fork_call are passed to the 666 // callback callee. 667 F->addMetadata( 668 llvm::LLVMContext::MD_callback, 669 *llvm::MDNode::get( 670 Ctx, {MDB.createCallbackEncoding(2, {-1, -1}, 671 /* VarArgsArePassed */ true)})); 672 } 673 } 674 675 OutlineInfo OI; 676 OI.PostOutlineCB = [=](Function &OutlinedFn) { 677 // Add some known attributes. 678 OutlinedFn.addParamAttr(0, Attribute::NoAlias); 679 OutlinedFn.addParamAttr(1, Attribute::NoAlias); 680 OutlinedFn.addFnAttr(Attribute::NoUnwind); 681 OutlinedFn.addFnAttr(Attribute::NoRecurse); 682 683 assert(OutlinedFn.arg_size() >= 2 && 684 "Expected at least tid and bounded tid as arguments"); 685 unsigned NumCapturedVars = 686 OutlinedFn.arg_size() - /* tid & bounded tid */ 2; 687 688 CallInst *CI = cast<CallInst>(OutlinedFn.user_back()); 689 CI->getParent()->setName("omp_parallel"); 690 Builder.SetInsertPoint(CI); 691 692 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn); 693 Value *ForkCallArgs[] = { 694 Ident, Builder.getInt32(NumCapturedVars), 695 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)}; 696 697 SmallVector<Value *, 16> RealArgs; 698 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs)); 699 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end()); 700 701 Builder.CreateCall(RTLFn, RealArgs); 702 703 LLVM_DEBUG(dbgs() << "With fork_call placed: " 704 << *Builder.GetInsertBlock()->getParent() << "\n"); 705 706 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end()); 707 708 // Initialize the local TID stack location with the argument value. 709 Builder.SetInsertPoint(PrivTID); 710 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin(); 711 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr); 712 713 // If no "if" clause was present we do not need the call created during 714 // outlining, otherwise we reuse it in the serialized parallel region. 715 if (!ElseTI) { 716 CI->eraseFromParent(); 717 } else { 718 719 // If an "if" clause was present we are now generating the serialized 720 // version into the "else" branch. 721 Builder.SetInsertPoint(ElseTI); 722 723 // Build calls __kmpc_serialized_parallel(&Ident, GTid); 724 Value *SerializedParallelCallArgs[] = {Ident, ThreadID}; 725 Builder.CreateCall( 726 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel), 727 SerializedParallelCallArgs); 728 729 // OutlinedFn(>id, &zero, CapturedStruct); 730 CI->removeFromParent(); 731 Builder.Insert(CI); 732 733 // __kmpc_end_serialized_parallel(&Ident, GTid); 734 Value *EndArgs[] = {Ident, ThreadID}; 735 Builder.CreateCall( 736 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel), 737 EndArgs); 738 739 LLVM_DEBUG(dbgs() << "With serialized parallel region: " 740 << *Builder.GetInsertBlock()->getParent() << "\n"); 741 } 742 743 for (Instruction *I : ToBeDeleted) 744 I->eraseFromParent(); 745 }; 746 747 // Adjust the finalization stack, verify the adjustment, and call the 748 // finalize function a last time to finalize values between the pre-fini 749 // block and the exit block if we left the parallel "the normal way". 750 auto FiniInfo = FinalizationStack.pop_back_val(); 751 (void)FiniInfo; 752 assert(FiniInfo.DK == OMPD_parallel && 753 "Unexpected finalization stack state!"); 754 755 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator(); 756 757 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator()); 758 FiniCB(PreFiniIP); 759 760 OI.EntryBB = PRegEntryBB; 761 OI.ExitBB = PRegExitBB; 762 763 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 764 SmallVector<BasicBlock *, 32> Blocks; 765 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 766 767 // Ensure a single exit node for the outlined region by creating one. 768 // We might have multiple incoming edges to the exit now due to finalizations, 769 // e.g., cancel calls that cause the control flow to leave the region. 770 BasicBlock *PRegOutlinedExitBB = PRegExitBB; 771 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt()); 772 PRegOutlinedExitBB->setName("omp.par.outlined.exit"); 773 Blocks.push_back(PRegOutlinedExitBB); 774 775 CodeExtractorAnalysisCache CEAC(*OuterFn); 776 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 777 /* AggregateArgs */ false, 778 /* BlockFrequencyInfo */ nullptr, 779 /* BranchProbabilityInfo */ nullptr, 780 /* AssumptionCache */ nullptr, 781 /* AllowVarArgs */ true, 782 /* AllowAlloca */ true, 783 /* Suffix */ ".omp_par"); 784 785 // Find inputs to, outputs from the code region. 786 BasicBlock *CommonExit = nullptr; 787 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands; 788 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit); 789 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands); 790 791 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n"); 792 793 FunctionCallee TIDRTLFn = 794 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num); 795 796 auto PrivHelper = [&](Value &V) { 797 if (&V == TIDAddr || &V == ZeroAddr) 798 return; 799 800 SetVector<Use *> Uses; 801 for (Use &U : V.uses()) 802 if (auto *UserI = dyn_cast<Instruction>(U.getUser())) 803 if (ParallelRegionBlockSet.count(UserI->getParent())) 804 Uses.insert(&U); 805 806 // __kmpc_fork_call expects extra arguments as pointers. If the input 807 // already has a pointer type, everything is fine. Otherwise, store the 808 // value onto stack and load it back inside the to-be-outlined region. This 809 // will ensure only the pointer will be passed to the function. 810 // FIXME: if there are more than 15 trailing arguments, they must be 811 // additionally packed in a struct. 812 Value *Inner = &V; 813 if (!V.getType()->isPointerTy()) { 814 IRBuilder<>::InsertPointGuard Guard(Builder); 815 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n"); 816 817 Builder.restoreIP(OuterAllocaIP); 818 Value *Ptr = 819 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded"); 820 821 // Store to stack at end of the block that currently branches to the entry 822 // block of the to-be-outlined region. 823 Builder.SetInsertPoint(InsertBB, 824 InsertBB->getTerminator()->getIterator()); 825 Builder.CreateStore(&V, Ptr); 826 827 // Load back next to allocations in the to-be-outlined region. 828 Builder.restoreIP(InnerAllocaIP); 829 Inner = Builder.CreateLoad(V.getType(), Ptr); 830 } 831 832 Value *ReplacementValue = nullptr; 833 CallInst *CI = dyn_cast<CallInst>(&V); 834 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) { 835 ReplacementValue = PrivTID; 836 } else { 837 Builder.restoreIP( 838 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue)); 839 assert(ReplacementValue && 840 "Expected copy/create callback to set replacement value!"); 841 if (ReplacementValue == &V) 842 return; 843 } 844 845 for (Use *UPtr : Uses) 846 UPtr->set(ReplacementValue); 847 }; 848 849 // Reset the inner alloca insertion as it will be used for loading the values 850 // wrapped into pointers before passing them into the to-be-outlined region. 851 // Configure it to insert immediately after the fake use of zero address so 852 // that they are available in the generated body and so that the 853 // OpenMP-related values (thread ID and zero address pointers) remain leading 854 // in the argument list. 855 InnerAllocaIP = IRBuilder<>::InsertPoint( 856 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator()); 857 858 // Reset the outer alloca insertion point to the entry of the relevant block 859 // in case it was invalidated. 860 OuterAllocaIP = IRBuilder<>::InsertPoint( 861 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt()); 862 863 for (Value *Input : Inputs) { 864 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n"); 865 PrivHelper(*Input); 866 } 867 LLVM_DEBUG({ 868 for (Value *Output : Outputs) 869 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n"); 870 }); 871 assert(Outputs.empty() && 872 "OpenMP outlining should not produce live-out values!"); 873 874 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n"); 875 LLVM_DEBUG({ 876 for (auto *BB : Blocks) 877 dbgs() << " PBR: " << BB->getName() << "\n"; 878 }); 879 880 // Register the outlined info. 881 addOutlineInfo(std::move(OI)); 882 883 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end()); 884 UI->eraseFromParent(); 885 886 return AfterIP; 887 } 888 889 void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) { 890 // Build call void __kmpc_flush(ident_t *loc) 891 uint32_t SrcLocStrSize; 892 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 893 Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)}; 894 895 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args); 896 } 897 898 void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) { 899 if (!updateToLocation(Loc)) 900 return; 901 emitFlush(Loc); 902 } 903 904 void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) { 905 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 906 // global_tid); 907 uint32_t SrcLocStrSize; 908 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 909 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 910 Value *Args[] = {Ident, getOrCreateThreadID(Ident)}; 911 912 // Ignore return result until untied tasks are supported. 913 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait), 914 Args); 915 } 916 917 void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) { 918 if (!updateToLocation(Loc)) 919 return; 920 emitTaskwaitImpl(Loc); 921 } 922 923 void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) { 924 // Build call __kmpc_omp_taskyield(loc, thread_id, 0); 925 uint32_t SrcLocStrSize; 926 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 927 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 928 Constant *I32Null = ConstantInt::getNullValue(Int32); 929 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null}; 930 931 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield), 932 Args); 933 } 934 935 void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) { 936 if (!updateToLocation(Loc)) 937 return; 938 emitTaskyieldImpl(Loc); 939 } 940 941 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections( 942 const LocationDescription &Loc, InsertPointTy AllocaIP, 943 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, 944 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) { 945 if (!updateToLocation(Loc)) 946 return Loc.IP; 947 948 auto FiniCBWrapper = [&](InsertPointTy IP) { 949 if (IP.getBlock()->end() != IP.getPoint()) 950 return FiniCB(IP); 951 // This must be done otherwise any nested constructs using FinalizeOMPRegion 952 // will fail because that function requires the Finalization Basic Block to 953 // have a terminator, which is already removed by EmitOMPRegionBody. 954 // IP is currently at cancelation block. 955 // We need to backtrack to the condition block to fetch 956 // the exit block and create a branch from cancelation 957 // to exit block. 958 IRBuilder<>::InsertPointGuard IPG(Builder); 959 Builder.restoreIP(IP); 960 auto *CaseBB = IP.getBlock()->getSinglePredecessor(); 961 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 962 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 963 Instruction *I = Builder.CreateBr(ExitBB); 964 IP = InsertPointTy(I->getParent(), I->getIterator()); 965 return FiniCB(IP); 966 }; 967 968 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable}); 969 970 // Each section is emitted as a switch case 971 // Each finalization callback is handled from clang.EmitOMPSectionDirective() 972 // -> OMP.createSection() which generates the IR for each section 973 // Iterate through all sections and emit a switch construct: 974 // switch (IV) { 975 // case 0: 976 // <SectionStmt[0]>; 977 // break; 978 // ... 979 // case <NumSection> - 1: 980 // <SectionStmt[<NumSection> - 1]>; 981 // break; 982 // } 983 // ... 984 // section_loop.after: 985 // <FiniCB>; 986 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) { 987 auto *CurFn = CodeGenIP.getBlock()->getParent(); 988 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor(); 989 auto *ForExitBB = CodeGenIP.getBlock() 990 ->getSinglePredecessor() 991 ->getTerminator() 992 ->getSuccessor(1); 993 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB); 994 Builder.restoreIP(CodeGenIP); 995 unsigned CaseNumber = 0; 996 for (auto SectionCB : SectionCBs) { 997 auto *CaseBB = BasicBlock::Create(M.getContext(), 998 "omp_section_loop.body.case", CurFn); 999 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB); 1000 Builder.SetInsertPoint(CaseBB); 1001 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB); 1002 CaseNumber++; 1003 } 1004 // remove the existing terminator from body BB since there can be no 1005 // terminators after switch/case 1006 CodeGenIP.getBlock()->getTerminator()->eraseFromParent(); 1007 }; 1008 // Loop body ends here 1009 // LowerBound, UpperBound, and STride for createCanonicalLoop 1010 Type *I32Ty = Type::getInt32Ty(M.getContext()); 1011 Value *LB = ConstantInt::get(I32Ty, 0); 1012 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size()); 1013 Value *ST = ConstantInt::get(I32Ty, 1); 1014 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop( 1015 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop"); 1016 Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator()); 1017 AllocaIP = Builder.saveIP(); 1018 InsertPointTy AfterIP = 1019 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait); 1020 BasicBlock *LoopAfterBB = AfterIP.getBlock(); 1021 Instruction *SplitPos = LoopAfterBB->getTerminator(); 1022 if (!isa_and_nonnull<BranchInst>(SplitPos)) 1023 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB); 1024 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB, 1025 // which requires a BB with branch 1026 BasicBlock *ExitBB = 1027 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end"); 1028 SplitPos->eraseFromParent(); 1029 1030 // Apply the finalization callback in LoopAfterBB 1031 auto FiniInfo = FinalizationStack.pop_back_val(); 1032 assert(FiniInfo.DK == OMPD_sections && 1033 "Unexpected finalization stack state!"); 1034 Builder.SetInsertPoint(LoopAfterBB->getTerminator()); 1035 FiniInfo.FiniCB(Builder.saveIP()); 1036 Builder.SetInsertPoint(ExitBB); 1037 1038 return Builder.saveIP(); 1039 } 1040 1041 OpenMPIRBuilder::InsertPointTy 1042 OpenMPIRBuilder::createSection(const LocationDescription &Loc, 1043 BodyGenCallbackTy BodyGenCB, 1044 FinalizeCallbackTy FiniCB) { 1045 if (!updateToLocation(Loc)) 1046 return Loc.IP; 1047 1048 auto FiniCBWrapper = [&](InsertPointTy IP) { 1049 if (IP.getBlock()->end() != IP.getPoint()) 1050 return FiniCB(IP); 1051 // This must be done otherwise any nested constructs using FinalizeOMPRegion 1052 // will fail because that function requires the Finalization Basic Block to 1053 // have a terminator, which is already removed by EmitOMPRegionBody. 1054 // IP is currently at cancelation block. 1055 // We need to backtrack to the condition block to fetch 1056 // the exit block and create a branch from cancelation 1057 // to exit block. 1058 IRBuilder<>::InsertPointGuard IPG(Builder); 1059 Builder.restoreIP(IP); 1060 auto *CaseBB = Loc.IP.getBlock(); 1061 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 1062 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 1063 Instruction *I = Builder.CreateBr(ExitBB); 1064 IP = InsertPointTy(I->getParent(), I->getIterator()); 1065 return FiniCB(IP); 1066 }; 1067 1068 Directive OMPD = Directive::OMPD_sections; 1069 // Since we are using Finalization Callback here, HasFinalize 1070 // and IsCancellable have to be true 1071 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper, 1072 /*Conditional*/ false, /*hasFinalize*/ true, 1073 /*IsCancellable*/ true); 1074 } 1075 1076 /// Create a function with a unique name and a "void (i8*, i8*)" signature in 1077 /// the given module and return it. 1078 Function *getFreshReductionFunc(Module &M) { 1079 Type *VoidTy = Type::getVoidTy(M.getContext()); 1080 Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext()); 1081 auto *FuncTy = 1082 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false); 1083 return Function::Create(FuncTy, GlobalVariable::InternalLinkage, 1084 M.getDataLayout().getDefaultGlobalsAddressSpace(), 1085 ".omp.reduction.func", &M); 1086 } 1087 1088 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions( 1089 const LocationDescription &Loc, InsertPointTy AllocaIP, 1090 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) { 1091 for (const ReductionInfo &RI : ReductionInfos) { 1092 (void)RI; 1093 assert(RI.Variable && "expected non-null variable"); 1094 assert(RI.PrivateVariable && "expected non-null private variable"); 1095 assert(RI.ReductionGen && "expected non-null reduction generator callback"); 1096 assert(RI.Variable->getType() == RI.PrivateVariable->getType() && 1097 "expected variables and their private equivalents to have the same " 1098 "type"); 1099 assert(RI.Variable->getType()->isPointerTy() && 1100 "expected variables to be pointers"); 1101 } 1102 1103 if (!updateToLocation(Loc)) 1104 return InsertPointTy(); 1105 1106 BasicBlock *InsertBlock = Loc.IP.getBlock(); 1107 BasicBlock *ContinuationBlock = 1108 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize"); 1109 InsertBlock->getTerminator()->eraseFromParent(); 1110 1111 // Create and populate array of type-erased pointers to private reduction 1112 // values. 1113 unsigned NumReductions = ReductionInfos.size(); 1114 Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions); 1115 Builder.restoreIP(AllocaIP); 1116 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array"); 1117 1118 Builder.SetInsertPoint(InsertBlock, InsertBlock->end()); 1119 1120 for (auto En : enumerate(ReductionInfos)) { 1121 unsigned Index = En.index(); 1122 const ReductionInfo &RI = En.value(); 1123 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64( 1124 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index)); 1125 Value *Casted = 1126 Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(), 1127 "private.red.var." + Twine(Index) + ".casted"); 1128 Builder.CreateStore(Casted, RedArrayElemPtr); 1129 } 1130 1131 // Emit a call to the runtime function that orchestrates the reduction. 1132 // Declare the reduction function in the process. 1133 Function *Func = Builder.GetInsertBlock()->getParent(); 1134 Module *Module = Func->getParent(); 1135 Value *RedArrayPtr = 1136 Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr"); 1137 uint32_t SrcLocStrSize; 1138 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1139 bool CanGenerateAtomic = 1140 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) { 1141 return RI.AtomicReductionGen; 1142 }); 1143 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize, 1144 CanGenerateAtomic 1145 ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE 1146 : IdentFlag(0)); 1147 Value *ThreadId = getOrCreateThreadID(Ident); 1148 Constant *NumVariables = Builder.getInt32(NumReductions); 1149 const DataLayout &DL = Module->getDataLayout(); 1150 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy); 1151 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize); 1152 Function *ReductionFunc = getFreshReductionFunc(*Module); 1153 Value *Lock = getOMPCriticalRegionLock(".reduction"); 1154 Function *ReduceFunc = getOrCreateRuntimeFunctionPtr( 1155 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait 1156 : RuntimeFunction::OMPRTL___kmpc_reduce); 1157 CallInst *ReduceCall = 1158 Builder.CreateCall(ReduceFunc, 1159 {Ident, ThreadId, NumVariables, RedArraySize, 1160 RedArrayPtr, ReductionFunc, Lock}, 1161 "reduce"); 1162 1163 // Create final reduction entry blocks for the atomic and non-atomic case. 1164 // Emit IR that dispatches control flow to one of the blocks based on the 1165 // reduction supporting the atomic mode. 1166 BasicBlock *NonAtomicRedBlock = 1167 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func); 1168 BasicBlock *AtomicRedBlock = 1169 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func); 1170 SwitchInst *Switch = 1171 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2); 1172 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock); 1173 Switch->addCase(Builder.getInt32(2), AtomicRedBlock); 1174 1175 // Populate the non-atomic reduction using the elementwise reduction function. 1176 // This loads the elements from the global and private variables and reduces 1177 // them before storing back the result to the global variable. 1178 Builder.SetInsertPoint(NonAtomicRedBlock); 1179 for (auto En : enumerate(ReductionInfos)) { 1180 const ReductionInfo &RI = En.value(); 1181 Type *ValueType = RI.ElementType; 1182 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable, 1183 "red.value." + Twine(En.index())); 1184 Value *PrivateRedValue = 1185 Builder.CreateLoad(ValueType, RI.PrivateVariable, 1186 "red.private.value." + Twine(En.index())); 1187 Value *Reduced; 1188 Builder.restoreIP( 1189 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced)); 1190 if (!Builder.GetInsertBlock()) 1191 return InsertPointTy(); 1192 Builder.CreateStore(Reduced, RI.Variable); 1193 } 1194 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr( 1195 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait 1196 : RuntimeFunction::OMPRTL___kmpc_end_reduce); 1197 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock}); 1198 Builder.CreateBr(ContinuationBlock); 1199 1200 // Populate the atomic reduction using the atomic elementwise reduction 1201 // function. There are no loads/stores here because they will be happening 1202 // inside the atomic elementwise reduction. 1203 Builder.SetInsertPoint(AtomicRedBlock); 1204 if (CanGenerateAtomic) { 1205 for (const ReductionInfo &RI : ReductionInfos) { 1206 Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.ElementType, 1207 RI.Variable, RI.PrivateVariable)); 1208 if (!Builder.GetInsertBlock()) 1209 return InsertPointTy(); 1210 } 1211 Builder.CreateBr(ContinuationBlock); 1212 } else { 1213 Builder.CreateUnreachable(); 1214 } 1215 1216 // Populate the outlined reduction function using the elementwise reduction 1217 // function. Partial values are extracted from the type-erased array of 1218 // pointers to private variables. 1219 BasicBlock *ReductionFuncBlock = 1220 BasicBlock::Create(Module->getContext(), "", ReductionFunc); 1221 Builder.SetInsertPoint(ReductionFuncBlock); 1222 Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0), 1223 RedArrayTy->getPointerTo()); 1224 Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1), 1225 RedArrayTy->getPointerTo()); 1226 for (auto En : enumerate(ReductionInfos)) { 1227 const ReductionInfo &RI = En.value(); 1228 Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1229 RedArrayTy, LHSArrayPtr, 0, En.index()); 1230 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr); 1231 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType()); 1232 Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); 1233 Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1234 RedArrayTy, RHSArrayPtr, 0, En.index()); 1235 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr); 1236 Value *RHSPtr = 1237 Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType()); 1238 Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); 1239 Value *Reduced; 1240 Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced)); 1241 if (!Builder.GetInsertBlock()) 1242 return InsertPointTy(); 1243 Builder.CreateStore(Reduced, LHSPtr); 1244 } 1245 Builder.CreateRetVoid(); 1246 1247 Builder.SetInsertPoint(ContinuationBlock); 1248 return Builder.saveIP(); 1249 } 1250 1251 OpenMPIRBuilder::InsertPointTy 1252 OpenMPIRBuilder::createMaster(const LocationDescription &Loc, 1253 BodyGenCallbackTy BodyGenCB, 1254 FinalizeCallbackTy FiniCB) { 1255 1256 if (!updateToLocation(Loc)) 1257 return Loc.IP; 1258 1259 Directive OMPD = Directive::OMPD_master; 1260 uint32_t SrcLocStrSize; 1261 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1262 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1263 Value *ThreadId = getOrCreateThreadID(Ident); 1264 Value *Args[] = {Ident, ThreadId}; 1265 1266 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master); 1267 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1268 1269 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master); 1270 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 1271 1272 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1273 /*Conditional*/ true, /*hasFinalize*/ true); 1274 } 1275 1276 OpenMPIRBuilder::InsertPointTy 1277 OpenMPIRBuilder::createMasked(const LocationDescription &Loc, 1278 BodyGenCallbackTy BodyGenCB, 1279 FinalizeCallbackTy FiniCB, Value *Filter) { 1280 if (!updateToLocation(Loc)) 1281 return Loc.IP; 1282 1283 Directive OMPD = Directive::OMPD_masked; 1284 uint32_t SrcLocStrSize; 1285 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1286 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1287 Value *ThreadId = getOrCreateThreadID(Ident); 1288 Value *Args[] = {Ident, ThreadId, Filter}; 1289 Value *ArgsEnd[] = {Ident, ThreadId}; 1290 1291 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked); 1292 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1293 1294 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked); 1295 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd); 1296 1297 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1298 /*Conditional*/ true, /*hasFinalize*/ true); 1299 } 1300 1301 CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( 1302 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, 1303 BasicBlock *PostInsertBefore, const Twine &Name) { 1304 Module *M = F->getParent(); 1305 LLVMContext &Ctx = M->getContext(); 1306 Type *IndVarTy = TripCount->getType(); 1307 1308 // Create the basic block structure. 1309 BasicBlock *Preheader = 1310 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore); 1311 BasicBlock *Header = 1312 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore); 1313 BasicBlock *Cond = 1314 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore); 1315 BasicBlock *Body = 1316 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore); 1317 BasicBlock *Latch = 1318 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore); 1319 BasicBlock *Exit = 1320 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore); 1321 BasicBlock *After = 1322 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore); 1323 1324 // Use specified DebugLoc for new instructions. 1325 Builder.SetCurrentDebugLocation(DL); 1326 1327 Builder.SetInsertPoint(Preheader); 1328 Builder.CreateBr(Header); 1329 1330 Builder.SetInsertPoint(Header); 1331 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv"); 1332 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader); 1333 Builder.CreateBr(Cond); 1334 1335 Builder.SetInsertPoint(Cond); 1336 Value *Cmp = 1337 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp"); 1338 Builder.CreateCondBr(Cmp, Body, Exit); 1339 1340 Builder.SetInsertPoint(Body); 1341 Builder.CreateBr(Latch); 1342 1343 Builder.SetInsertPoint(Latch); 1344 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1), 1345 "omp_" + Name + ".next", /*HasNUW=*/true); 1346 Builder.CreateBr(Header); 1347 IndVarPHI->addIncoming(Next, Latch); 1348 1349 Builder.SetInsertPoint(Exit); 1350 Builder.CreateBr(After); 1351 1352 // Remember and return the canonical control flow. 1353 LoopInfos.emplace_front(); 1354 CanonicalLoopInfo *CL = &LoopInfos.front(); 1355 1356 CL->Header = Header; 1357 CL->Cond = Cond; 1358 CL->Latch = Latch; 1359 CL->Exit = Exit; 1360 1361 #ifndef NDEBUG 1362 CL->assertOK(); 1363 #endif 1364 return CL; 1365 } 1366 1367 CanonicalLoopInfo * 1368 OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, 1369 LoopBodyGenCallbackTy BodyGenCB, 1370 Value *TripCount, const Twine &Name) { 1371 BasicBlock *BB = Loc.IP.getBlock(); 1372 BasicBlock *NextBB = BB->getNextNode(); 1373 1374 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(), 1375 NextBB, NextBB, Name); 1376 BasicBlock *After = CL->getAfter(); 1377 1378 // If location is not set, don't connect the loop. 1379 if (updateToLocation(Loc)) { 1380 // Split the loop at the insertion point: Branch to the preheader and move 1381 // every following instruction to after the loop (the After BB). Also, the 1382 // new successor is the loop's after block. 1383 Builder.CreateBr(CL->getPreheader()); 1384 After->getInstList().splice(After->begin(), BB->getInstList(), 1385 Builder.GetInsertPoint(), BB->end()); 1386 After->replaceSuccessorsPhiUsesWith(BB, After); 1387 } 1388 1389 // Emit the body content. We do it after connecting the loop to the CFG to 1390 // avoid that the callback encounters degenerate BBs. 1391 BodyGenCB(CL->getBodyIP(), CL->getIndVar()); 1392 1393 #ifndef NDEBUG 1394 CL->assertOK(); 1395 #endif 1396 return CL; 1397 } 1398 1399 CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop( 1400 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, 1401 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, 1402 InsertPointTy ComputeIP, const Twine &Name) { 1403 1404 // Consider the following difficulties (assuming 8-bit signed integers): 1405 // * Adding \p Step to the loop counter which passes \p Stop may overflow: 1406 // DO I = 1, 100, 50 1407 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction: 1408 // DO I = 100, 0, -128 1409 1410 // Start, Stop and Step must be of the same integer type. 1411 auto *IndVarTy = cast<IntegerType>(Start->getType()); 1412 assert(IndVarTy == Stop->getType() && "Stop type mismatch"); 1413 assert(IndVarTy == Step->getType() && "Step type mismatch"); 1414 1415 LocationDescription ComputeLoc = 1416 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; 1417 updateToLocation(ComputeLoc); 1418 1419 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0); 1420 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 1421 1422 // Like Step, but always positive. 1423 Value *Incr = Step; 1424 1425 // Distance between Start and Stop; always positive. 1426 Value *Span; 1427 1428 // Condition whether there are no iterations are executed at all, e.g. because 1429 // UB < LB. 1430 Value *ZeroCmp; 1431 1432 if (IsSigned) { 1433 // Ensure that increment is positive. If not, negate and invert LB and UB. 1434 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero); 1435 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step); 1436 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start); 1437 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop); 1438 Span = Builder.CreateSub(UB, LB, "", false, true); 1439 ZeroCmp = Builder.CreateICmp( 1440 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB); 1441 } else { 1442 Span = Builder.CreateSub(Stop, Start, "", true); 1443 ZeroCmp = Builder.CreateICmp( 1444 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start); 1445 } 1446 1447 Value *CountIfLooping; 1448 if (InclusiveStop) { 1449 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One); 1450 } else { 1451 // Avoid incrementing past stop since it could overflow. 1452 Value *CountIfTwo = Builder.CreateAdd( 1453 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One); 1454 Value *OneCmp = Builder.CreateICmp( 1455 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr); 1456 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo); 1457 } 1458 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping, 1459 "omp_" + Name + ".tripcount"); 1460 1461 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { 1462 Builder.restoreIP(CodeGenIP); 1463 Value *Span = Builder.CreateMul(IV, Step); 1464 Value *IndVar = Builder.CreateAdd(Span, Start); 1465 BodyGenCB(Builder.saveIP(), IndVar); 1466 }; 1467 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP(); 1468 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name); 1469 } 1470 1471 // Returns an LLVM function to call for initializing loop bounds using OpenMP 1472 // static scheduling depending on `type`. Only i32 and i64 are supported by the 1473 // runtime. Always interpret integers as unsigned similarly to 1474 // CanonicalLoopInfo. 1475 static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, 1476 OpenMPIRBuilder &OMPBuilder) { 1477 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1478 if (Bitwidth == 32) 1479 return OMPBuilder.getOrCreateRuntimeFunction( 1480 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u); 1481 if (Bitwidth == 64) 1482 return OMPBuilder.getOrCreateRuntimeFunction( 1483 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u); 1484 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1485 } 1486 1487 // Sets the number of loop iterations to the given value. This value must be 1488 // valid in the condition block (i.e., defined in the preheader) and is 1489 // interpreted as an unsigned integer. 1490 void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) { 1491 Instruction *CmpI = &CLI->getCond()->front(); 1492 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); 1493 CmpI->setOperand(1, TripCount); 1494 CLI->assertOK(); 1495 } 1496 1497 OpenMPIRBuilder::InsertPointTy 1498 OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1499 InsertPointTy AllocaIP, 1500 bool NeedsBarrier, Value *Chunk) { 1501 assert(CLI->isValid() && "Requires a valid canonical loop"); 1502 1503 // Set up the source location value for OpenMP runtime. 1504 Builder.restoreIP(CLI->getPreheaderIP()); 1505 Builder.SetCurrentDebugLocation(DL); 1506 1507 uint32_t SrcLocStrSize; 1508 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); 1509 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1510 1511 // Declare useful OpenMP runtime functions. 1512 Value *IV = CLI->getIndVar(); 1513 Type *IVTy = IV->getType(); 1514 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this); 1515 FunctionCallee StaticFini = 1516 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); 1517 1518 // Allocate space for computed loop bounds as expected by the "init" function. 1519 Builder.restoreIP(AllocaIP); 1520 Type *I32Type = Type::getInt32Ty(M.getContext()); 1521 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1522 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1523 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1524 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1525 1526 // At the end of the preheader, prepare for calling the "init" function by 1527 // storing the current loop bounds into the allocated space. A canonical loop 1528 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1529 // and produces an inclusive upper bound. 1530 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); 1531 Constant *Zero = ConstantInt::get(IVTy, 0); 1532 Constant *One = ConstantInt::get(IVTy, 1); 1533 Builder.CreateStore(Zero, PLowerBound); 1534 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One); 1535 Builder.CreateStore(UpperBound, PUpperBound); 1536 Builder.CreateStore(One, PStride); 1537 1538 // FIXME: schedule(static) is NOT the same as schedule(static,1) 1539 if (!Chunk) 1540 Chunk = One; 1541 1542 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1543 1544 Constant *SchedulingType = 1545 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static)); 1546 1547 // Call the "init" function and update the trip count of the loop with the 1548 // value it produced. 1549 Builder.CreateCall(StaticInit, 1550 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, 1551 PUpperBound, PStride, One, Chunk}); 1552 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); 1553 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); 1554 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); 1555 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One); 1556 setCanonicalLoopTripCount(CLI, TripCount); 1557 1558 // Update all uses of the induction variable except the one in the condition 1559 // block that compares it with the actual upper bound, and the increment in 1560 // the latch block. 1561 // TODO: this can eventually move to CanonicalLoopInfo or to a new 1562 // CanonicalLoopInfoUpdater interface. 1563 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt()); 1564 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound); 1565 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) { 1566 auto *Instr = dyn_cast<Instruction>(U.getUser()); 1567 return !Instr || 1568 (Instr->getParent() != CLI->getCond() && 1569 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV); 1570 }); 1571 1572 // In the "exit" block, call the "fini" function. 1573 Builder.SetInsertPoint(CLI->getExit(), 1574 CLI->getExit()->getTerminator()->getIterator()); 1575 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); 1576 1577 // Add the barrier if requested. 1578 if (NeedsBarrier) 1579 createBarrier(LocationDescription(Builder.saveIP(), DL), 1580 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 1581 /* CheckCancelFlag */ false); 1582 1583 InsertPointTy AfterIP = CLI->getAfterIP(); 1584 CLI->invalidate(); 1585 1586 return AfterIP; 1587 } 1588 1589 OpenMPIRBuilder::InsertPointTy 1590 OpenMPIRBuilder::applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1591 InsertPointTy AllocaIP, bool NeedsBarrier) { 1592 // Currently only supports static schedules. 1593 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier); 1594 } 1595 1596 /// Returns an LLVM function to call for initializing loop bounds using OpenMP 1597 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1598 /// the runtime. Always interpret integers as unsigned similarly to 1599 /// CanonicalLoopInfo. 1600 static FunctionCallee 1601 getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1602 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1603 if (Bitwidth == 32) 1604 return OMPBuilder.getOrCreateRuntimeFunction( 1605 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u); 1606 if (Bitwidth == 64) 1607 return OMPBuilder.getOrCreateRuntimeFunction( 1608 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u); 1609 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1610 } 1611 1612 /// Returns an LLVM function to call for updating the next loop using OpenMP 1613 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1614 /// the runtime. Always interpret integers as unsigned similarly to 1615 /// CanonicalLoopInfo. 1616 static FunctionCallee 1617 getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1618 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1619 if (Bitwidth == 32) 1620 return OMPBuilder.getOrCreateRuntimeFunction( 1621 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u); 1622 if (Bitwidth == 64) 1623 return OMPBuilder.getOrCreateRuntimeFunction( 1624 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u); 1625 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1626 } 1627 1628 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop( 1629 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, 1630 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) { 1631 assert(CLI->isValid() && "Requires a valid canonical loop"); 1632 1633 // Set up the source location value for OpenMP runtime. 1634 Builder.SetCurrentDebugLocation(DL); 1635 1636 uint32_t SrcLocStrSize; 1637 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); 1638 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1639 1640 // Declare useful OpenMP runtime functions. 1641 Value *IV = CLI->getIndVar(); 1642 Type *IVTy = IV->getType(); 1643 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this); 1644 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this); 1645 1646 // Allocate space for computed loop bounds as expected by the "init" function. 1647 Builder.restoreIP(AllocaIP); 1648 Type *I32Type = Type::getInt32Ty(M.getContext()); 1649 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1650 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1651 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1652 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1653 1654 // At the end of the preheader, prepare for calling the "init" function by 1655 // storing the current loop bounds into the allocated space. A canonical loop 1656 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1657 // and produces an inclusive upper bound. 1658 BasicBlock *PreHeader = CLI->getPreheader(); 1659 Builder.SetInsertPoint(PreHeader->getTerminator()); 1660 Constant *One = ConstantInt::get(IVTy, 1); 1661 Builder.CreateStore(One, PLowerBound); 1662 Value *UpperBound = CLI->getTripCount(); 1663 Builder.CreateStore(UpperBound, PUpperBound); 1664 Builder.CreateStore(One, PStride); 1665 1666 BasicBlock *Header = CLI->getHeader(); 1667 BasicBlock *Exit = CLI->getExit(); 1668 BasicBlock *Cond = CLI->getCond(); 1669 InsertPointTy AfterIP = CLI->getAfterIP(); 1670 1671 // The CLI will be "broken" in the code below, as the loop is no longer 1672 // a valid canonical loop. 1673 1674 if (!Chunk) 1675 Chunk = One; 1676 1677 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1678 1679 Constant *SchedulingType = 1680 ConstantInt::get(I32Type, static_cast<int>(SchedType)); 1681 1682 // Call the "init" function. 1683 Builder.CreateCall(DynamicInit, 1684 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One, 1685 UpperBound, /* step */ One, Chunk}); 1686 1687 // An outer loop around the existing one. 1688 BasicBlock *OuterCond = BasicBlock::Create( 1689 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond", 1690 PreHeader->getParent()); 1691 // This needs to be 32-bit always, so can't use the IVTy Zero above. 1692 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt()); 1693 Value *Res = 1694 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter, 1695 PLowerBound, PUpperBound, PStride}); 1696 Constant *Zero32 = ConstantInt::get(I32Type, 0); 1697 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32); 1698 Value *LowerBound = 1699 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb"); 1700 Builder.CreateCondBr(MoreWork, Header, Exit); 1701 1702 // Change PHI-node in loop header to use outer cond rather than preheader, 1703 // and set IV to the LowerBound. 1704 Instruction *Phi = &Header->front(); 1705 auto *PI = cast<PHINode>(Phi); 1706 PI->setIncomingBlock(0, OuterCond); 1707 PI->setIncomingValue(0, LowerBound); 1708 1709 // Then set the pre-header to jump to the OuterCond 1710 Instruction *Term = PreHeader->getTerminator(); 1711 auto *Br = cast<BranchInst>(Term); 1712 Br->setSuccessor(0, OuterCond); 1713 1714 // Modify the inner condition: 1715 // * Use the UpperBound returned from the DynamicNext call. 1716 // * jump to the loop outer loop when done with one of the inner loops. 1717 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt()); 1718 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub"); 1719 Instruction *Comp = &*Builder.GetInsertPoint(); 1720 auto *CI = cast<CmpInst>(Comp); 1721 CI->setOperand(1, UpperBound); 1722 // Redirect the inner exit to branch to outer condition. 1723 Instruction *Branch = &Cond->back(); 1724 auto *BI = cast<BranchInst>(Branch); 1725 assert(BI->getSuccessor(1) == Exit); 1726 BI->setSuccessor(1, OuterCond); 1727 1728 // Add the barrier if requested. 1729 if (NeedsBarrier) { 1730 Builder.SetInsertPoint(&Exit->back()); 1731 createBarrier(LocationDescription(Builder.saveIP(), DL), 1732 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 1733 /* CheckCancelFlag */ false); 1734 } 1735 1736 CLI->invalidate(); 1737 return AfterIP; 1738 } 1739 1740 /// Make \p Source branch to \p Target. 1741 /// 1742 /// Handles two situations: 1743 /// * \p Source already has an unconditional branch. 1744 /// * \p Source is a degenerate block (no terminator because the BB is 1745 /// the current head of the IR construction). 1746 static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) { 1747 if (Instruction *Term = Source->getTerminator()) { 1748 auto *Br = cast<BranchInst>(Term); 1749 assert(!Br->isConditional() && 1750 "BB's terminator must be an unconditional branch (or degenerate)"); 1751 BasicBlock *Succ = Br->getSuccessor(0); 1752 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true); 1753 Br->setSuccessor(0, Target); 1754 return; 1755 } 1756 1757 auto *NewBr = BranchInst::Create(Target, Source); 1758 NewBr->setDebugLoc(DL); 1759 } 1760 1761 /// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is, 1762 /// after this \p OldTarget will be orphaned. 1763 static void redirectAllPredecessorsTo(BasicBlock *OldTarget, 1764 BasicBlock *NewTarget, DebugLoc DL) { 1765 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget))) 1766 redirectTo(Pred, NewTarget, DL); 1767 } 1768 1769 /// Determine which blocks in \p BBs are reachable from outside and remove the 1770 /// ones that are not reachable from the function. 1771 static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) { 1772 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()}; 1773 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) { 1774 for (Use &U : BB->uses()) { 1775 auto *UseInst = dyn_cast<Instruction>(U.getUser()); 1776 if (!UseInst) 1777 continue; 1778 if (BBsToErase.count(UseInst->getParent())) 1779 continue; 1780 return true; 1781 } 1782 return false; 1783 }; 1784 1785 while (true) { 1786 bool Changed = false; 1787 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) { 1788 if (HasRemainingUses(BB)) { 1789 BBsToErase.erase(BB); 1790 Changed = true; 1791 } 1792 } 1793 if (!Changed) 1794 break; 1795 } 1796 1797 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end()); 1798 DeleteDeadBlocks(BBVec); 1799 } 1800 1801 CanonicalLoopInfo * 1802 OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 1803 InsertPointTy ComputeIP) { 1804 assert(Loops.size() >= 1 && "At least one loop required"); 1805 size_t NumLoops = Loops.size(); 1806 1807 // Nothing to do if there is already just one loop. 1808 if (NumLoops == 1) 1809 return Loops.front(); 1810 1811 CanonicalLoopInfo *Outermost = Loops.front(); 1812 CanonicalLoopInfo *Innermost = Loops.back(); 1813 BasicBlock *OrigPreheader = Outermost->getPreheader(); 1814 BasicBlock *OrigAfter = Outermost->getAfter(); 1815 Function *F = OrigPreheader->getParent(); 1816 1817 // Loop control blocks that may become orphaned later. 1818 SmallVector<BasicBlock *, 12> OldControlBBs; 1819 OldControlBBs.reserve(6 * Loops.size()); 1820 for (CanonicalLoopInfo *Loop : Loops) 1821 Loop->collectControlBlocks(OldControlBBs); 1822 1823 // Setup the IRBuilder for inserting the trip count computation. 1824 Builder.SetCurrentDebugLocation(DL); 1825 if (ComputeIP.isSet()) 1826 Builder.restoreIP(ComputeIP); 1827 else 1828 Builder.restoreIP(Outermost->getPreheaderIP()); 1829 1830 // Derive the collapsed' loop trip count. 1831 // TODO: Find common/largest indvar type. 1832 Value *CollapsedTripCount = nullptr; 1833 for (CanonicalLoopInfo *L : Loops) { 1834 assert(L->isValid() && 1835 "All loops to collapse must be valid canonical loops"); 1836 Value *OrigTripCount = L->getTripCount(); 1837 if (!CollapsedTripCount) { 1838 CollapsedTripCount = OrigTripCount; 1839 continue; 1840 } 1841 1842 // TODO: Enable UndefinedSanitizer to diagnose an overflow here. 1843 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount, 1844 {}, /*HasNUW=*/true); 1845 } 1846 1847 // Create the collapsed loop control flow. 1848 CanonicalLoopInfo *Result = 1849 createLoopSkeleton(DL, CollapsedTripCount, F, 1850 OrigPreheader->getNextNode(), OrigAfter, "collapsed"); 1851 1852 // Build the collapsed loop body code. 1853 // Start with deriving the input loop induction variables from the collapsed 1854 // one, using a divmod scheme. To preserve the original loops' order, the 1855 // innermost loop use the least significant bits. 1856 Builder.restoreIP(Result->getBodyIP()); 1857 1858 Value *Leftover = Result->getIndVar(); 1859 SmallVector<Value *> NewIndVars; 1860 NewIndVars.resize(NumLoops); 1861 for (int i = NumLoops - 1; i >= 1; --i) { 1862 Value *OrigTripCount = Loops[i]->getTripCount(); 1863 1864 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount); 1865 NewIndVars[i] = NewIndVar; 1866 1867 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount); 1868 } 1869 // Outermost loop gets all the remaining bits. 1870 NewIndVars[0] = Leftover; 1871 1872 // Construct the loop body control flow. 1873 // We progressively construct the branch structure following in direction of 1874 // the control flow, from the leading in-between code, the loop nest body, the 1875 // trailing in-between code, and rejoining the collapsed loop's latch. 1876 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If 1877 // the ContinueBlock is set, continue with that block. If ContinuePred, use 1878 // its predecessors as sources. 1879 BasicBlock *ContinueBlock = Result->getBody(); 1880 BasicBlock *ContinuePred = nullptr; 1881 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest, 1882 BasicBlock *NextSrc) { 1883 if (ContinueBlock) 1884 redirectTo(ContinueBlock, Dest, DL); 1885 else 1886 redirectAllPredecessorsTo(ContinuePred, Dest, DL); 1887 1888 ContinueBlock = nullptr; 1889 ContinuePred = NextSrc; 1890 }; 1891 1892 // The code before the nested loop of each level. 1893 // Because we are sinking it into the nest, it will be executed more often 1894 // that the original loop. More sophisticated schemes could keep track of what 1895 // the in-between code is and instantiate it only once per thread. 1896 for (size_t i = 0; i < NumLoops - 1; ++i) 1897 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader()); 1898 1899 // Connect the loop nest body. 1900 ContinueWith(Innermost->getBody(), Innermost->getLatch()); 1901 1902 // The code after the nested loop at each level. 1903 for (size_t i = NumLoops - 1; i > 0; --i) 1904 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch()); 1905 1906 // Connect the finished loop to the collapsed loop latch. 1907 ContinueWith(Result->getLatch(), nullptr); 1908 1909 // Replace the input loops with the new collapsed loop. 1910 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL); 1911 redirectTo(Result->getAfter(), Outermost->getAfter(), DL); 1912 1913 // Replace the input loop indvars with the derived ones. 1914 for (size_t i = 0; i < NumLoops; ++i) 1915 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]); 1916 1917 // Remove unused parts of the input loops. 1918 removeUnusedBlocksFromParent(OldControlBBs); 1919 1920 for (CanonicalLoopInfo *L : Loops) 1921 L->invalidate(); 1922 1923 #ifndef NDEBUG 1924 Result->assertOK(); 1925 #endif 1926 return Result; 1927 } 1928 1929 std::vector<CanonicalLoopInfo *> 1930 OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 1931 ArrayRef<Value *> TileSizes) { 1932 assert(TileSizes.size() == Loops.size() && 1933 "Must pass as many tile sizes as there are loops"); 1934 int NumLoops = Loops.size(); 1935 assert(NumLoops >= 1 && "At least one loop to tile required"); 1936 1937 CanonicalLoopInfo *OutermostLoop = Loops.front(); 1938 CanonicalLoopInfo *InnermostLoop = Loops.back(); 1939 Function *F = OutermostLoop->getBody()->getParent(); 1940 BasicBlock *InnerEnter = InnermostLoop->getBody(); 1941 BasicBlock *InnerLatch = InnermostLoop->getLatch(); 1942 1943 // Loop control blocks that may become orphaned later. 1944 SmallVector<BasicBlock *, 12> OldControlBBs; 1945 OldControlBBs.reserve(6 * Loops.size()); 1946 for (CanonicalLoopInfo *Loop : Loops) 1947 Loop->collectControlBlocks(OldControlBBs); 1948 1949 // Collect original trip counts and induction variable to be accessible by 1950 // index. Also, the structure of the original loops is not preserved during 1951 // the construction of the tiled loops, so do it before we scavenge the BBs of 1952 // any original CanonicalLoopInfo. 1953 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars; 1954 for (CanonicalLoopInfo *L : Loops) { 1955 assert(L->isValid() && "All input loops must be valid canonical loops"); 1956 OrigTripCounts.push_back(L->getTripCount()); 1957 OrigIndVars.push_back(L->getIndVar()); 1958 } 1959 1960 // Collect the code between loop headers. These may contain SSA definitions 1961 // that are used in the loop nest body. To be usable with in the innermost 1962 // body, these BasicBlocks will be sunk into the loop nest body. That is, 1963 // these instructions may be executed more often than before the tiling. 1964 // TODO: It would be sufficient to only sink them into body of the 1965 // corresponding tile loop. 1966 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode; 1967 for (int i = 0; i < NumLoops - 1; ++i) { 1968 CanonicalLoopInfo *Surrounding = Loops[i]; 1969 CanonicalLoopInfo *Nested = Loops[i + 1]; 1970 1971 BasicBlock *EnterBB = Surrounding->getBody(); 1972 BasicBlock *ExitBB = Nested->getHeader(); 1973 InbetweenCode.emplace_back(EnterBB, ExitBB); 1974 } 1975 1976 // Compute the trip counts of the floor loops. 1977 Builder.SetCurrentDebugLocation(DL); 1978 Builder.restoreIP(OutermostLoop->getPreheaderIP()); 1979 SmallVector<Value *, 4> FloorCount, FloorRems; 1980 for (int i = 0; i < NumLoops; ++i) { 1981 Value *TileSize = TileSizes[i]; 1982 Value *OrigTripCount = OrigTripCounts[i]; 1983 Type *IVType = OrigTripCount->getType(); 1984 1985 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize); 1986 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize); 1987 1988 // 0 if tripcount divides the tilesize, 1 otherwise. 1989 // 1 means we need an additional iteration for a partial tile. 1990 // 1991 // Unfortunately we cannot just use the roundup-formula 1992 // (tripcount + tilesize - 1)/tilesize 1993 // because the summation might overflow. We do not want introduce undefined 1994 // behavior when the untiled loop nest did not. 1995 Value *FloorTripOverflow = 1996 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0)); 1997 1998 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType); 1999 FloorTripCount = 2000 Builder.CreateAdd(FloorTripCount, FloorTripOverflow, 2001 "omp_floor" + Twine(i) + ".tripcount", true); 2002 2003 // Remember some values for later use. 2004 FloorCount.push_back(FloorTripCount); 2005 FloorRems.push_back(FloorTripRem); 2006 } 2007 2008 // Generate the new loop nest, from the outermost to the innermost. 2009 std::vector<CanonicalLoopInfo *> Result; 2010 Result.reserve(NumLoops * 2); 2011 2012 // The basic block of the surrounding loop that enters the nest generated 2013 // loop. 2014 BasicBlock *Enter = OutermostLoop->getPreheader(); 2015 2016 // The basic block of the surrounding loop where the inner code should 2017 // continue. 2018 BasicBlock *Continue = OutermostLoop->getAfter(); 2019 2020 // Where the next loop basic block should be inserted. 2021 BasicBlock *OutroInsertBefore = InnermostLoop->getExit(); 2022 2023 auto EmbeddNewLoop = 2024 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore]( 2025 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * { 2026 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton( 2027 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name); 2028 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL); 2029 redirectTo(EmbeddedLoop->getAfter(), Continue, DL); 2030 2031 // Setup the position where the next embedded loop connects to this loop. 2032 Enter = EmbeddedLoop->getBody(); 2033 Continue = EmbeddedLoop->getLatch(); 2034 OutroInsertBefore = EmbeddedLoop->getLatch(); 2035 return EmbeddedLoop; 2036 }; 2037 2038 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts, 2039 const Twine &NameBase) { 2040 for (auto P : enumerate(TripCounts)) { 2041 CanonicalLoopInfo *EmbeddedLoop = 2042 EmbeddNewLoop(P.value(), NameBase + Twine(P.index())); 2043 Result.push_back(EmbeddedLoop); 2044 } 2045 }; 2046 2047 EmbeddNewLoops(FloorCount, "floor"); 2048 2049 // Within the innermost floor loop, emit the code that computes the tile 2050 // sizes. 2051 Builder.SetInsertPoint(Enter->getTerminator()); 2052 SmallVector<Value *, 4> TileCounts; 2053 for (int i = 0; i < NumLoops; ++i) { 2054 CanonicalLoopInfo *FloorLoop = Result[i]; 2055 Value *TileSize = TileSizes[i]; 2056 2057 Value *FloorIsEpilogue = 2058 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]); 2059 Value *TileTripCount = 2060 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize); 2061 2062 TileCounts.push_back(TileTripCount); 2063 } 2064 2065 // Create the tile loops. 2066 EmbeddNewLoops(TileCounts, "tile"); 2067 2068 // Insert the inbetween code into the body. 2069 BasicBlock *BodyEnter = Enter; 2070 BasicBlock *BodyEntered = nullptr; 2071 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) { 2072 BasicBlock *EnterBB = P.first; 2073 BasicBlock *ExitBB = P.second; 2074 2075 if (BodyEnter) 2076 redirectTo(BodyEnter, EnterBB, DL); 2077 else 2078 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL); 2079 2080 BodyEnter = nullptr; 2081 BodyEntered = ExitBB; 2082 } 2083 2084 // Append the original loop nest body into the generated loop nest body. 2085 if (BodyEnter) 2086 redirectTo(BodyEnter, InnerEnter, DL); 2087 else 2088 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL); 2089 redirectAllPredecessorsTo(InnerLatch, Continue, DL); 2090 2091 // Replace the original induction variable with an induction variable computed 2092 // from the tile and floor induction variables. 2093 Builder.restoreIP(Result.back()->getBodyIP()); 2094 for (int i = 0; i < NumLoops; ++i) { 2095 CanonicalLoopInfo *FloorLoop = Result[i]; 2096 CanonicalLoopInfo *TileLoop = Result[NumLoops + i]; 2097 Value *OrigIndVar = OrigIndVars[i]; 2098 Value *Size = TileSizes[i]; 2099 2100 Value *Scale = 2101 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true); 2102 Value *Shift = 2103 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true); 2104 OrigIndVar->replaceAllUsesWith(Shift); 2105 } 2106 2107 // Remove unused parts of the original loops. 2108 removeUnusedBlocksFromParent(OldControlBBs); 2109 2110 for (CanonicalLoopInfo *L : Loops) 2111 L->invalidate(); 2112 2113 #ifndef NDEBUG 2114 for (CanonicalLoopInfo *GenL : Result) 2115 GenL->assertOK(); 2116 #endif 2117 return Result; 2118 } 2119 2120 /// Attach loop metadata \p Properties to the loop described by \p Loop. If the 2121 /// loop already has metadata, the loop properties are appended. 2122 static void addLoopMetadata(CanonicalLoopInfo *Loop, 2123 ArrayRef<Metadata *> Properties) { 2124 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo"); 2125 2126 // Nothing to do if no property to attach. 2127 if (Properties.empty()) 2128 return; 2129 2130 LLVMContext &Ctx = Loop->getFunction()->getContext(); 2131 SmallVector<Metadata *> NewLoopProperties; 2132 NewLoopProperties.push_back(nullptr); 2133 2134 // If the loop already has metadata, prepend it to the new metadata. 2135 BasicBlock *Latch = Loop->getLatch(); 2136 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch"); 2137 MDNode *Existing = Latch->getTerminator()->getMetadata(LLVMContext::MD_loop); 2138 if (Existing) 2139 append_range(NewLoopProperties, drop_begin(Existing->operands(), 1)); 2140 2141 append_range(NewLoopProperties, Properties); 2142 MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties); 2143 LoopID->replaceOperandWith(0, LoopID); 2144 2145 Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID); 2146 } 2147 2148 void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) { 2149 LLVMContext &Ctx = Builder.getContext(); 2150 addLoopMetadata( 2151 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2152 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))}); 2153 } 2154 2155 void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) { 2156 LLVMContext &Ctx = Builder.getContext(); 2157 addLoopMetadata( 2158 Loop, { 2159 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2160 }); 2161 } 2162 2163 /// Create the TargetMachine object to query the backend for optimization 2164 /// preferences. 2165 /// 2166 /// Ideally, this would be passed from the front-end to the OpenMPBuilder, but 2167 /// e.g. Clang does not pass it to its CodeGen layer and creates it only when 2168 /// needed for the LLVM pass pipline. We use some default options to avoid 2169 /// having to pass too many settings from the frontend that probably do not 2170 /// matter. 2171 /// 2172 /// Currently, TargetMachine is only used sometimes by the unrollLoopPartial 2173 /// method. If we are going to use TargetMachine for more purposes, especially 2174 /// those that are sensitive to TargetOptions, RelocModel and CodeModel, it 2175 /// might become be worth requiring front-ends to pass on their TargetMachine, 2176 /// or at least cache it between methods. Note that while fontends such as Clang 2177 /// have just a single main TargetMachine per translation unit, "target-cpu" and 2178 /// "target-features" that determine the TargetMachine are per-function and can 2179 /// be overrided using __attribute__((target("OPTIONS"))). 2180 static std::unique_ptr<TargetMachine> 2181 createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) { 2182 Module *M = F->getParent(); 2183 2184 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString(); 2185 StringRef Features = F->getFnAttribute("target-features").getValueAsString(); 2186 const std::string &Triple = M->getTargetTriple(); 2187 2188 std::string Error; 2189 const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); 2190 if (!TheTarget) 2191 return {}; 2192 2193 llvm::TargetOptions Options; 2194 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine( 2195 Triple, CPU, Features, Options, /*RelocModel=*/None, /*CodeModel=*/None, 2196 OptLevel)); 2197 } 2198 2199 /// Heuristically determine the best-performant unroll factor for \p CLI. This 2200 /// depends on the target processor. We are re-using the same heuristics as the 2201 /// LoopUnrollPass. 2202 static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) { 2203 Function *F = CLI->getFunction(); 2204 2205 // Assume the user requests the most aggressive unrolling, even if the rest of 2206 // the code is optimized using a lower setting. 2207 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive; 2208 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel); 2209 2210 FunctionAnalysisManager FAM; 2211 FAM.registerPass([]() { return TargetLibraryAnalysis(); }); 2212 FAM.registerPass([]() { return AssumptionAnalysis(); }); 2213 FAM.registerPass([]() { return DominatorTreeAnalysis(); }); 2214 FAM.registerPass([]() { return LoopAnalysis(); }); 2215 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); }); 2216 FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); 2217 TargetIRAnalysis TIRA; 2218 if (TM) 2219 TIRA = TargetIRAnalysis( 2220 [&](const Function &F) { return TM->getTargetTransformInfo(F); }); 2221 FAM.registerPass([&]() { return TIRA; }); 2222 2223 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM); 2224 ScalarEvolutionAnalysis SEA; 2225 ScalarEvolution &&SE = SEA.run(*F, FAM); 2226 DominatorTreeAnalysis DTA; 2227 DominatorTree &&DT = DTA.run(*F, FAM); 2228 LoopAnalysis LIA; 2229 LoopInfo &&LI = LIA.run(*F, FAM); 2230 AssumptionAnalysis ACT; 2231 AssumptionCache &&AC = ACT.run(*F, FAM); 2232 OptimizationRemarkEmitter ORE{F}; 2233 2234 Loop *L = LI.getLoopFor(CLI->getHeader()); 2235 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop"); 2236 2237 TargetTransformInfo::UnrollingPreferences UP = 2238 gatherUnrollingPreferences(L, SE, TTI, 2239 /*BlockFrequencyInfo=*/nullptr, 2240 /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel, 2241 /*UserThreshold=*/None, 2242 /*UserCount=*/None, 2243 /*UserAllowPartial=*/true, 2244 /*UserAllowRuntime=*/true, 2245 /*UserUpperBound=*/None, 2246 /*UserFullUnrollMaxCount=*/None); 2247 2248 UP.Force = true; 2249 2250 // Account for additional optimizations taking place before the LoopUnrollPass 2251 // would unroll the loop. 2252 UP.Threshold *= UnrollThresholdFactor; 2253 UP.PartialThreshold *= UnrollThresholdFactor; 2254 2255 // Use normal unroll factors even if the rest of the code is optimized for 2256 // size. 2257 UP.OptSizeThreshold = UP.Threshold; 2258 UP.PartialOptSizeThreshold = UP.PartialThreshold; 2259 2260 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n" 2261 << " Threshold=" << UP.Threshold << "\n" 2262 << " PartialThreshold=" << UP.PartialThreshold << "\n" 2263 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" 2264 << " PartialOptSizeThreshold=" 2265 << UP.PartialOptSizeThreshold << "\n"); 2266 2267 // Disable peeling. 2268 TargetTransformInfo::PeelingPreferences PP = 2269 gatherPeelingPreferences(L, SE, TTI, 2270 /*UserAllowPeeling=*/false, 2271 /*UserAllowProfileBasedPeeling=*/false, 2272 /*UnrollingSpecficValues=*/false); 2273 2274 SmallPtrSet<const Value *, 32> EphValues; 2275 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 2276 2277 // Assume that reads and writes to stack variables can be eliminated by 2278 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's 2279 // size. 2280 for (BasicBlock *BB : L->blocks()) { 2281 for (Instruction &I : *BB) { 2282 Value *Ptr; 2283 if (auto *Load = dyn_cast<LoadInst>(&I)) { 2284 Ptr = Load->getPointerOperand(); 2285 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 2286 Ptr = Store->getPointerOperand(); 2287 } else 2288 continue; 2289 2290 Ptr = Ptr->stripPointerCasts(); 2291 2292 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) { 2293 if (Alloca->getParent() == &F->getEntryBlock()) 2294 EphValues.insert(&I); 2295 } 2296 } 2297 } 2298 2299 unsigned NumInlineCandidates; 2300 bool NotDuplicatable; 2301 bool Convergent; 2302 unsigned LoopSize = 2303 ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent, 2304 TTI, EphValues, UP.BEInsns); 2305 LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSize << "\n"); 2306 2307 // Loop is not unrollable if the loop contains certain instructions. 2308 if (NotDuplicatable || Convergent) { 2309 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n"); 2310 return 1; 2311 } 2312 2313 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might 2314 // be able to use it. 2315 int TripCount = 0; 2316 int MaxTripCount = 0; 2317 bool MaxOrZero = false; 2318 unsigned TripMultiple = 0; 2319 2320 bool UseUpperBound = false; 2321 computeUnrollCount(L, TTI, DT, &LI, SE, EphValues, &ORE, TripCount, 2322 MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP, 2323 UseUpperBound); 2324 unsigned Factor = UP.Count; 2325 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n"); 2326 2327 // This function returns 1 to signal to not unroll a loop. 2328 if (Factor == 0) 2329 return 1; 2330 return Factor; 2331 } 2332 2333 void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, 2334 int32_t Factor, 2335 CanonicalLoopInfo **UnrolledCLI) { 2336 assert(Factor >= 0 && "Unroll factor must not be negative"); 2337 2338 Function *F = Loop->getFunction(); 2339 LLVMContext &Ctx = F->getContext(); 2340 2341 // If the unrolled loop is not used for another loop-associated directive, it 2342 // is sufficient to add metadata for the LoopUnrollPass. 2343 if (!UnrolledCLI) { 2344 SmallVector<Metadata *, 2> LoopMetadata; 2345 LoopMetadata.push_back( 2346 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable"))); 2347 2348 if (Factor >= 1) { 2349 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2350 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2351 LoopMetadata.push_back(MDNode::get( 2352 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})); 2353 } 2354 2355 addLoopMetadata(Loop, LoopMetadata); 2356 return; 2357 } 2358 2359 // Heuristically determine the unroll factor. 2360 if (Factor == 0) 2361 Factor = computeHeuristicUnrollFactor(Loop); 2362 2363 // No change required with unroll factor 1. 2364 if (Factor == 1) { 2365 *UnrolledCLI = Loop; 2366 return; 2367 } 2368 2369 assert(Factor >= 2 && 2370 "unrolling only makes sense with a factor of 2 or larger"); 2371 2372 Type *IndVarTy = Loop->getIndVarType(); 2373 2374 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully 2375 // unroll the inner loop. 2376 Value *FactorVal = 2377 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor, 2378 /*isSigned=*/false)); 2379 std::vector<CanonicalLoopInfo *> LoopNest = 2380 tileLoops(DL, {Loop}, {FactorVal}); 2381 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling"); 2382 *UnrolledCLI = LoopNest[0]; 2383 CanonicalLoopInfo *InnerLoop = LoopNest[1]; 2384 2385 // LoopUnrollPass can only fully unroll loops with constant trip count. 2386 // Unroll by the unroll factor with a fallback epilog for the remainder 2387 // iterations if necessary. 2388 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2389 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2390 addLoopMetadata( 2391 InnerLoop, 2392 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2393 MDNode::get( 2394 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})}); 2395 2396 #ifndef NDEBUG 2397 (*UnrolledCLI)->assertOK(); 2398 #endif 2399 } 2400 2401 OpenMPIRBuilder::InsertPointTy 2402 OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc, 2403 llvm::Value *BufSize, llvm::Value *CpyBuf, 2404 llvm::Value *CpyFn, llvm::Value *DidIt) { 2405 if (!updateToLocation(Loc)) 2406 return Loc.IP; 2407 2408 uint32_t SrcLocStrSize; 2409 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2410 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2411 Value *ThreadId = getOrCreateThreadID(Ident); 2412 2413 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt); 2414 2415 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD}; 2416 2417 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate); 2418 Builder.CreateCall(Fn, Args); 2419 2420 return Builder.saveIP(); 2421 } 2422 2423 OpenMPIRBuilder::InsertPointTy 2424 OpenMPIRBuilder::createSingle(const LocationDescription &Loc, 2425 BodyGenCallbackTy BodyGenCB, 2426 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) { 2427 2428 if (!updateToLocation(Loc)) 2429 return Loc.IP; 2430 2431 // If needed (i.e. not null), initialize `DidIt` with 0 2432 if (DidIt) { 2433 Builder.CreateStore(Builder.getInt32(0), DidIt); 2434 } 2435 2436 Directive OMPD = Directive::OMPD_single; 2437 uint32_t SrcLocStrSize; 2438 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2439 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2440 Value *ThreadId = getOrCreateThreadID(Ident); 2441 Value *Args[] = {Ident, ThreadId}; 2442 2443 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single); 2444 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2445 2446 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single); 2447 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2448 2449 // generates the following: 2450 // if (__kmpc_single()) { 2451 // .... single region ... 2452 // __kmpc_end_single 2453 // } 2454 2455 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2456 /*Conditional*/ true, /*hasFinalize*/ true); 2457 } 2458 2459 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical( 2460 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2461 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) { 2462 2463 if (!updateToLocation(Loc)) 2464 return Loc.IP; 2465 2466 Directive OMPD = Directive::OMPD_critical; 2467 uint32_t SrcLocStrSize; 2468 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2469 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2470 Value *ThreadId = getOrCreateThreadID(Ident); 2471 Value *LockVar = getOMPCriticalRegionLock(CriticalName); 2472 Value *Args[] = {Ident, ThreadId, LockVar}; 2473 2474 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args)); 2475 Function *RTFn = nullptr; 2476 if (HintInst) { 2477 // Add Hint to entry Args and create call 2478 EnterArgs.push_back(HintInst); 2479 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint); 2480 } else { 2481 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical); 2482 } 2483 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs); 2484 2485 Function *ExitRTLFn = 2486 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical); 2487 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2488 2489 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2490 /*Conditional*/ false, /*hasFinalize*/ true); 2491 } 2492 2493 OpenMPIRBuilder::InsertPointTy 2494 OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc, 2495 InsertPointTy AllocaIP, unsigned NumLoops, 2496 ArrayRef<llvm::Value *> StoreValues, 2497 const Twine &Name, bool IsDependSource) { 2498 for (size_t I = 0; I < StoreValues.size(); I++) 2499 assert(StoreValues[I]->getType()->isIntegerTy(64) && 2500 "OpenMP runtime requires depend vec with i64 type"); 2501 2502 if (!updateToLocation(Loc)) 2503 return Loc.IP; 2504 2505 // Allocate space for vector and generate alloc instruction. 2506 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops); 2507 Builder.restoreIP(AllocaIP); 2508 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name); 2509 ArgsBase->setAlignment(Align(8)); 2510 Builder.restoreIP(Loc.IP); 2511 2512 // Store the index value with offset in depend vector. 2513 for (unsigned I = 0; I < NumLoops; ++I) { 2514 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP( 2515 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)}); 2516 StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter); 2517 STInst->setAlignment(Align(8)); 2518 } 2519 2520 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP( 2521 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)}); 2522 2523 uint32_t SrcLocStrSize; 2524 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2525 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2526 Value *ThreadId = getOrCreateThreadID(Ident); 2527 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP}; 2528 2529 Function *RTLFn = nullptr; 2530 if (IsDependSource) 2531 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post); 2532 else 2533 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait); 2534 Builder.CreateCall(RTLFn, Args); 2535 2536 return Builder.saveIP(); 2537 } 2538 2539 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd( 2540 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2541 FinalizeCallbackTy FiniCB, bool IsThreads) { 2542 if (!updateToLocation(Loc)) 2543 return Loc.IP; 2544 2545 Directive OMPD = Directive::OMPD_ordered; 2546 Instruction *EntryCall = nullptr; 2547 Instruction *ExitCall = nullptr; 2548 2549 if (IsThreads) { 2550 uint32_t SrcLocStrSize; 2551 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2552 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2553 Value *ThreadId = getOrCreateThreadID(Ident); 2554 Value *Args[] = {Ident, ThreadId}; 2555 2556 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered); 2557 EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2558 2559 Function *ExitRTLFn = 2560 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered); 2561 ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2562 } 2563 2564 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2565 /*Conditional*/ false, /*hasFinalize*/ true); 2566 } 2567 2568 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion( 2569 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, 2570 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional, 2571 bool HasFinalize, bool IsCancellable) { 2572 2573 if (HasFinalize) 2574 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable}); 2575 2576 // Create inlined region's entry and body blocks, in preparation 2577 // for conditional creation 2578 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2579 Instruction *SplitPos = EntryBB->getTerminator(); 2580 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2581 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB); 2582 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end"); 2583 BasicBlock *FiniBB = 2584 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize"); 2585 2586 Builder.SetInsertPoint(EntryBB->getTerminator()); 2587 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional); 2588 2589 // generate body 2590 BodyGenCB(/* AllocaIP */ InsertPointTy(), 2591 /* CodeGenIP */ Builder.saveIP(), *FiniBB); 2592 2593 // If we didn't emit a branch to FiniBB during body generation, it means 2594 // FiniBB is unreachable (e.g. while(1);). stop generating all the 2595 // unreachable blocks, and remove anything we are not going to use. 2596 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0); 2597 if (SkipEmittingRegion) { 2598 FiniBB->eraseFromParent(); 2599 ExitCall->eraseFromParent(); 2600 // Discard finalization if we have it. 2601 if (HasFinalize) { 2602 assert(!FinalizationStack.empty() && 2603 "Unexpected finalization stack state!"); 2604 FinalizationStack.pop_back(); 2605 } 2606 } else { 2607 // emit exit call and do any needed finalization. 2608 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt()); 2609 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 && 2610 FiniBB->getTerminator()->getSuccessor(0) == ExitBB && 2611 "Unexpected control flow graph state!!"); 2612 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize); 2613 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && 2614 "Unexpected Control Flow State!"); 2615 MergeBlockIntoPredecessor(FiniBB); 2616 } 2617 2618 // If we are skipping the region of a non conditional, remove the exit 2619 // block, and clear the builder's insertion point. 2620 assert(SplitPos->getParent() == ExitBB && 2621 "Unexpected Insertion point location!"); 2622 if (!Conditional && SkipEmittingRegion) { 2623 ExitBB->eraseFromParent(); 2624 Builder.ClearInsertionPoint(); 2625 } else { 2626 auto merged = MergeBlockIntoPredecessor(ExitBB); 2627 BasicBlock *ExitPredBB = SplitPos->getParent(); 2628 auto InsertBB = merged ? ExitPredBB : ExitBB; 2629 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2630 SplitPos->eraseFromParent(); 2631 Builder.SetInsertPoint(InsertBB); 2632 } 2633 2634 return Builder.saveIP(); 2635 } 2636 2637 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry( 2638 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) { 2639 // if nothing to do, Return current insertion point. 2640 if (!Conditional || !EntryCall) 2641 return Builder.saveIP(); 2642 2643 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2644 Value *CallBool = Builder.CreateIsNotNull(EntryCall); 2645 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body"); 2646 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB); 2647 2648 // Emit thenBB and set the Builder's insertion point there for 2649 // body generation next. Place the block after the current block. 2650 Function *CurFn = EntryBB->getParent(); 2651 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB); 2652 2653 // Move Entry branch to end of ThenBB, and replace with conditional 2654 // branch (If-stmt) 2655 Instruction *EntryBBTI = EntryBB->getTerminator(); 2656 Builder.CreateCondBr(CallBool, ThenBB, ExitBB); 2657 EntryBBTI->removeFromParent(); 2658 Builder.SetInsertPoint(UI); 2659 Builder.Insert(EntryBBTI); 2660 UI->eraseFromParent(); 2661 Builder.SetInsertPoint(ThenBB->getTerminator()); 2662 2663 // return an insertion point to ExitBB. 2664 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt()); 2665 } 2666 2667 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit( 2668 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, 2669 bool HasFinalize) { 2670 2671 Builder.restoreIP(FinIP); 2672 2673 // If there is finalization to do, emit it before the exit call 2674 if (HasFinalize) { 2675 assert(!FinalizationStack.empty() && 2676 "Unexpected finalization stack state!"); 2677 2678 FinalizationInfo Fi = FinalizationStack.pop_back_val(); 2679 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!"); 2680 2681 Fi.FiniCB(FinIP); 2682 2683 BasicBlock *FiniBB = FinIP.getBlock(); 2684 Instruction *FiniBBTI = FiniBB->getTerminator(); 2685 2686 // set Builder IP for call creation 2687 Builder.SetInsertPoint(FiniBBTI); 2688 } 2689 2690 if (!ExitCall) 2691 return Builder.saveIP(); 2692 2693 // place the Exitcall as last instruction before Finalization block terminator 2694 ExitCall->removeFromParent(); 2695 Builder.Insert(ExitCall); 2696 2697 return IRBuilder<>::InsertPoint(ExitCall->getParent(), 2698 ExitCall->getIterator()); 2699 } 2700 2701 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks( 2702 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, 2703 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) { 2704 if (!IP.isSet()) 2705 return IP; 2706 2707 IRBuilder<>::InsertPointGuard IPG(Builder); 2708 2709 // creates the following CFG structure 2710 // OMP_Entry : (MasterAddr != PrivateAddr)? 2711 // F T 2712 // | \ 2713 // | copin.not.master 2714 // | / 2715 // v / 2716 // copyin.not.master.end 2717 // | 2718 // v 2719 // OMP.Entry.Next 2720 2721 BasicBlock *OMP_Entry = IP.getBlock(); 2722 Function *CurFn = OMP_Entry->getParent(); 2723 BasicBlock *CopyBegin = 2724 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn); 2725 BasicBlock *CopyEnd = nullptr; 2726 2727 // If entry block is terminated, split to preserve the branch to following 2728 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is. 2729 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) { 2730 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(), 2731 "copyin.not.master.end"); 2732 OMP_Entry->getTerminator()->eraseFromParent(); 2733 } else { 2734 CopyEnd = 2735 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn); 2736 } 2737 2738 Builder.SetInsertPoint(OMP_Entry); 2739 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy); 2740 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy); 2741 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr); 2742 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd); 2743 2744 Builder.SetInsertPoint(CopyBegin); 2745 if (BranchtoEnd) 2746 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd)); 2747 2748 return Builder.saveIP(); 2749 } 2750 2751 CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc, 2752 Value *Size, Value *Allocator, 2753 std::string Name) { 2754 IRBuilder<>::InsertPointGuard IPG(Builder); 2755 Builder.restoreIP(Loc.IP); 2756 2757 uint32_t SrcLocStrSize; 2758 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2759 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2760 Value *ThreadId = getOrCreateThreadID(Ident); 2761 Value *Args[] = {ThreadId, Size, Allocator}; 2762 2763 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc); 2764 2765 return Builder.CreateCall(Fn, Args, Name); 2766 } 2767 2768 CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc, 2769 Value *Addr, Value *Allocator, 2770 std::string Name) { 2771 IRBuilder<>::InsertPointGuard IPG(Builder); 2772 Builder.restoreIP(Loc.IP); 2773 2774 uint32_t SrcLocStrSize; 2775 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2776 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2777 Value *ThreadId = getOrCreateThreadID(Ident); 2778 Value *Args[] = {ThreadId, Addr, Allocator}; 2779 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free); 2780 return Builder.CreateCall(Fn, Args, Name); 2781 } 2782 2783 CallInst *OpenMPIRBuilder::createCachedThreadPrivate( 2784 const LocationDescription &Loc, llvm::Value *Pointer, 2785 llvm::ConstantInt *Size, const llvm::Twine &Name) { 2786 IRBuilder<>::InsertPointGuard IPG(Builder); 2787 Builder.restoreIP(Loc.IP); 2788 2789 uint32_t SrcLocStrSize; 2790 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2791 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2792 Value *ThreadId = getOrCreateThreadID(Ident); 2793 Constant *ThreadPrivateCache = 2794 getOrCreateOMPInternalVariable(Int8PtrPtr, Name); 2795 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache}; 2796 2797 Function *Fn = 2798 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached); 2799 2800 return Builder.CreateCall(Fn, Args); 2801 } 2802 2803 OpenMPIRBuilder::InsertPointTy 2804 OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, 2805 bool RequiresFullRuntime) { 2806 if (!updateToLocation(Loc)) 2807 return Loc.IP; 2808 2809 uint32_t SrcLocStrSize; 2810 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2811 Constant *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2812 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 2813 IntegerType::getInt8Ty(Int8->getContext()), 2814 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 2815 ConstantInt *UseGenericStateMachine = 2816 ConstantInt::getBool(Int32->getContext(), !IsSPMD); 2817 ConstantInt *RequiresFullRuntimeVal = 2818 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 2819 2820 Function *Fn = getOrCreateRuntimeFunctionPtr( 2821 omp::RuntimeFunction::OMPRTL___kmpc_target_init); 2822 2823 CallInst *ThreadKind = Builder.CreateCall( 2824 Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal}); 2825 2826 Value *ExecUserCode = Builder.CreateICmpEQ( 2827 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), 2828 "exec_user_code"); 2829 2830 // ThreadKind = __kmpc_target_init(...) 2831 // if (ThreadKind == -1) 2832 // user_code 2833 // else 2834 // return; 2835 2836 auto *UI = Builder.CreateUnreachable(); 2837 BasicBlock *CheckBB = UI->getParent(); 2838 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry"); 2839 2840 BasicBlock *WorkerExitBB = BasicBlock::Create( 2841 CheckBB->getContext(), "worker.exit", CheckBB->getParent()); 2842 Builder.SetInsertPoint(WorkerExitBB); 2843 Builder.CreateRetVoid(); 2844 2845 auto *CheckBBTI = CheckBB->getTerminator(); 2846 Builder.SetInsertPoint(CheckBBTI); 2847 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB); 2848 2849 CheckBBTI->eraseFromParent(); 2850 UI->eraseFromParent(); 2851 2852 // Continue in the "user_code" block, see diagram above and in 2853 // openmp/libomptarget/deviceRTLs/common/include/target.h . 2854 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt()); 2855 } 2856 2857 void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc, 2858 bool IsSPMD, 2859 bool RequiresFullRuntime) { 2860 if (!updateToLocation(Loc)) 2861 return; 2862 2863 uint32_t SrcLocStrSize; 2864 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2865 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2866 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 2867 IntegerType::getInt8Ty(Int8->getContext()), 2868 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 2869 ConstantInt *RequiresFullRuntimeVal = 2870 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 2871 2872 Function *Fn = getOrCreateRuntimeFunctionPtr( 2873 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit); 2874 2875 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal}); 2876 } 2877 2878 std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts, 2879 StringRef FirstSeparator, 2880 StringRef Separator) { 2881 SmallString<128> Buffer; 2882 llvm::raw_svector_ostream OS(Buffer); 2883 StringRef Sep = FirstSeparator; 2884 for (StringRef Part : Parts) { 2885 OS << Sep << Part; 2886 Sep = Separator; 2887 } 2888 return OS.str().str(); 2889 } 2890 2891 Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable( 2892 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) { 2893 // TODO: Replace the twine arg with stringref to get rid of the conversion 2894 // logic. However This is taken from current implementation in clang as is. 2895 // Since this method is used in many places exclusively for OMP internal use 2896 // we will keep it as is for temporarily until we move all users to the 2897 // builder and then, if possible, fix it everywhere in one go. 2898 SmallString<256> Buffer; 2899 llvm::raw_svector_ostream Out(Buffer); 2900 Out << Name; 2901 StringRef RuntimeName = Out.str(); 2902 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first; 2903 if (Elem.second) { 2904 assert(Elem.second->getType()->getPointerElementType() == Ty && 2905 "OMP internal variable has different type than requested"); 2906 } else { 2907 // TODO: investigate the appropriate linkage type used for the global 2908 // variable for possibly changing that to internal or private, or maybe 2909 // create different versions of the function for different OMP internal 2910 // variables. 2911 Elem.second = new llvm::GlobalVariable( 2912 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage, 2913 llvm::Constant::getNullValue(Ty), Elem.first(), 2914 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 2915 AddressSpace); 2916 } 2917 2918 return Elem.second; 2919 } 2920 2921 Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) { 2922 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); 2923 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", "."); 2924 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name); 2925 } 2926 2927 GlobalVariable * 2928 OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, 2929 std::string VarName) { 2930 llvm::Constant *MaptypesArrayInit = 2931 llvm::ConstantDataArray::get(M.getContext(), Mappings); 2932 auto *MaptypesArrayGlobal = new llvm::GlobalVariable( 2933 M, MaptypesArrayInit->getType(), 2934 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit, 2935 VarName); 2936 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2937 return MaptypesArrayGlobal; 2938 } 2939 2940 void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc, 2941 InsertPointTy AllocaIP, 2942 unsigned NumOperands, 2943 struct MapperAllocas &MapperAllocas) { 2944 if (!updateToLocation(Loc)) 2945 return; 2946 2947 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 2948 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 2949 Builder.restoreIP(AllocaIP); 2950 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy); 2951 AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy); 2952 AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty); 2953 Builder.restoreIP(Loc.IP); 2954 MapperAllocas.ArgsBase = ArgsBase; 2955 MapperAllocas.Args = Args; 2956 MapperAllocas.ArgSizes = ArgSizes; 2957 } 2958 2959 void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc, 2960 Function *MapperFunc, Value *SrcLocInfo, 2961 Value *MaptypesArg, Value *MapnamesArg, 2962 struct MapperAllocas &MapperAllocas, 2963 int64_t DeviceID, unsigned NumOperands) { 2964 if (!updateToLocation(Loc)) 2965 return; 2966 2967 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 2968 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 2969 Value *ArgsBaseGEP = 2970 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase, 2971 {Builder.getInt32(0), Builder.getInt32(0)}); 2972 Value *ArgsGEP = 2973 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args, 2974 {Builder.getInt32(0), Builder.getInt32(0)}); 2975 Value *ArgSizesGEP = 2976 Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes, 2977 {Builder.getInt32(0), Builder.getInt32(0)}); 2978 Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo()); 2979 Builder.CreateCall(MapperFunc, 2980 {SrcLocInfo, Builder.getInt64(DeviceID), 2981 Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP, 2982 ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr}); 2983 } 2984 2985 bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic( 2986 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { 2987 assert(!(AO == AtomicOrdering::NotAtomic || 2988 AO == llvm::AtomicOrdering::Unordered) && 2989 "Unexpected Atomic Ordering."); 2990 2991 bool Flush = false; 2992 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic; 2993 2994 switch (AK) { 2995 case Read: 2996 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || 2997 AO == AtomicOrdering::SequentiallyConsistent) { 2998 FlushAO = AtomicOrdering::Acquire; 2999 Flush = true; 3000 } 3001 break; 3002 case Write: 3003 case Update: 3004 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || 3005 AO == AtomicOrdering::SequentiallyConsistent) { 3006 FlushAO = AtomicOrdering::Release; 3007 Flush = true; 3008 } 3009 break; 3010 case Capture: 3011 switch (AO) { 3012 case AtomicOrdering::Acquire: 3013 FlushAO = AtomicOrdering::Acquire; 3014 Flush = true; 3015 break; 3016 case AtomicOrdering::Release: 3017 FlushAO = AtomicOrdering::Release; 3018 Flush = true; 3019 break; 3020 case AtomicOrdering::AcquireRelease: 3021 case AtomicOrdering::SequentiallyConsistent: 3022 FlushAO = AtomicOrdering::AcquireRelease; 3023 Flush = true; 3024 break; 3025 default: 3026 // do nothing - leave silently. 3027 break; 3028 } 3029 } 3030 3031 if (Flush) { 3032 // Currently Flush RT call still doesn't take memory_ordering, so for when 3033 // that happens, this tries to do the resolution of which atomic ordering 3034 // to use with but issue the flush call 3035 // TODO: pass `FlushAO` after memory ordering support is added 3036 (void)FlushAO; 3037 emitFlush(Loc); 3038 } 3039 3040 // for AO == AtomicOrdering::Monotonic and all other case combinations 3041 // do nothing 3042 return Flush; 3043 } 3044 3045 OpenMPIRBuilder::InsertPointTy 3046 OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, 3047 AtomicOpValue &X, AtomicOpValue &V, 3048 AtomicOrdering AO) { 3049 if (!updateToLocation(Loc)) 3050 return Loc.IP; 3051 3052 Type *XTy = X.Var->getType(); 3053 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3054 Type *XElemTy = XTy->getPointerElementType(); 3055 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3056 XElemTy->isPointerTy()) && 3057 "OMP atomic read expected a scalar type"); 3058 3059 Value *XRead = nullptr; 3060 3061 if (XElemTy->isIntegerTy()) { 3062 LoadInst *XLD = 3063 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); 3064 XLD->setAtomic(AO); 3065 XRead = cast<Value>(XLD); 3066 } else { 3067 // We need to bitcast and perform atomic op as integer 3068 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3069 IntegerType *IntCastTy = 3070 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3071 Value *XBCast = Builder.CreateBitCast( 3072 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast"); 3073 LoadInst *XLoad = 3074 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load"); 3075 XLoad->setAtomic(AO); 3076 if (XElemTy->isFloatingPointTy()) { 3077 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast"); 3078 } else { 3079 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast"); 3080 } 3081 } 3082 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); 3083 Builder.CreateStore(XRead, V.Var, V.IsVolatile); 3084 return Builder.saveIP(); 3085 } 3086 3087 OpenMPIRBuilder::InsertPointTy 3088 OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, 3089 AtomicOpValue &X, Value *Expr, 3090 AtomicOrdering AO) { 3091 if (!updateToLocation(Loc)) 3092 return Loc.IP; 3093 3094 Type *XTy = X.Var->getType(); 3095 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3096 Type *XElemTy = XTy->getPointerElementType(); 3097 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3098 XElemTy->isPointerTy()) && 3099 "OMP atomic write expected a scalar type"); 3100 3101 if (XElemTy->isIntegerTy()) { 3102 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); 3103 XSt->setAtomic(AO); 3104 } else { 3105 // We need to bitcast and perform atomic op as integers 3106 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3107 IntegerType *IntCastTy = 3108 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3109 Value *XBCast = Builder.CreateBitCast( 3110 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast"); 3111 Value *ExprCast = 3112 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast"); 3113 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile); 3114 XSt->setAtomic(AO); 3115 } 3116 3117 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); 3118 return Builder.saveIP(); 3119 } 3120 3121 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( 3122 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, 3123 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3124 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) { 3125 if (!updateToLocation(Loc)) 3126 return Loc.IP; 3127 3128 LLVM_DEBUG({ 3129 Type *XTy = X.Var->getType(); 3130 assert(XTy->isPointerTy() && 3131 "OMP Atomic expects a pointer to target memory"); 3132 Type *XElemTy = XTy->getPointerElementType(); 3133 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3134 XElemTy->isPointerTy()) && 3135 "OMP atomic update expected a scalar type"); 3136 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3137 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && 3138 "OpenMP atomic does not support LT or GT operations"); 3139 }); 3140 3141 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile, 3142 IsXBinopExpr); 3143 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); 3144 return Builder.saveIP(); 3145 } 3146 3147 Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, 3148 AtomicRMWInst::BinOp RMWOp) { 3149 switch (RMWOp) { 3150 case AtomicRMWInst::Add: 3151 return Builder.CreateAdd(Src1, Src2); 3152 case AtomicRMWInst::Sub: 3153 return Builder.CreateSub(Src1, Src2); 3154 case AtomicRMWInst::And: 3155 return Builder.CreateAnd(Src1, Src2); 3156 case AtomicRMWInst::Nand: 3157 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); 3158 case AtomicRMWInst::Or: 3159 return Builder.CreateOr(Src1, Src2); 3160 case AtomicRMWInst::Xor: 3161 return Builder.CreateXor(Src1, Src2); 3162 case AtomicRMWInst::Xchg: 3163 case AtomicRMWInst::FAdd: 3164 case AtomicRMWInst::FSub: 3165 case AtomicRMWInst::BAD_BINOP: 3166 case AtomicRMWInst::Max: 3167 case AtomicRMWInst::Min: 3168 case AtomicRMWInst::UMax: 3169 case AtomicRMWInst::UMin: 3170 llvm_unreachable("Unsupported atomic update operation"); 3171 } 3172 llvm_unreachable("Unsupported atomic update operation"); 3173 } 3174 3175 std::pair<Value *, Value *> 3176 OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, 3177 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3178 AtomicUpdateCallbackTy &UpdateOp, 3179 bool VolatileX, bool IsXBinopExpr) { 3180 Type *XElemTy = X->getType()->getPointerElementType(); 3181 3182 bool DoCmpExch = 3183 ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) || 3184 (RMWOp == AtomicRMWInst::FSub) || 3185 (RMWOp == AtomicRMWInst::Sub && !IsXBinopExpr); 3186 3187 std::pair<Value *, Value *> Res; 3188 if (XElemTy->isIntegerTy() && !DoCmpExch) { 3189 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); 3190 // not needed except in case of postfix captures. Generate anyway for 3191 // consistency with the else part. Will be removed with any DCE pass. 3192 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); 3193 } else { 3194 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace(); 3195 IntegerType *IntCastTy = 3196 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3197 Value *XBCast = 3198 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3199 LoadInst *OldVal = 3200 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); 3201 OldVal->setAtomic(AO); 3202 // CurBB 3203 // | /---\ 3204 // ContBB | 3205 // | \---/ 3206 // ExitBB 3207 BasicBlock *CurBB = Builder.GetInsertBlock(); 3208 Instruction *CurBBTI = CurBB->getTerminator(); 3209 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); 3210 BasicBlock *ExitBB = 3211 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); 3212 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), 3213 X->getName() + ".atomic.cont"); 3214 ContBB->getTerminator()->eraseFromParent(); 3215 Builder.SetInsertPoint(ContBB); 3216 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); 3217 PHI->addIncoming(OldVal, CurBB); 3218 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy); 3219 NewAtomicAddr->setName(X->getName() + "x.new.val"); 3220 NewAtomicAddr->moveBefore(AllocIP); 3221 IntegerType *NewAtomicCastTy = 3222 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3223 bool IsIntTy = XElemTy->isIntegerTy(); 3224 Value *NewAtomicIntAddr = 3225 (IsIntTy) 3226 ? NewAtomicAddr 3227 : Builder.CreateBitCast(NewAtomicAddr, 3228 NewAtomicCastTy->getPointerTo(Addrspace)); 3229 Value *OldExprVal = PHI; 3230 if (!IsIntTy) { 3231 if (XElemTy->isFloatingPointTy()) { 3232 OldExprVal = Builder.CreateBitCast(PHI, XElemTy, 3233 X->getName() + ".atomic.fltCast"); 3234 } else { 3235 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, 3236 X->getName() + ".atomic.ptrCast"); 3237 } 3238 } 3239 3240 Value *Upd = UpdateOp(OldExprVal, Builder); 3241 Builder.CreateStore(Upd, NewAtomicAddr); 3242 LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr); 3243 Value *XAddr = 3244 (IsIntTy) 3245 ? X 3246 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3247 AtomicOrdering Failure = 3248 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 3249 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg( 3250 XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure); 3251 Result->setVolatile(VolatileX); 3252 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); 3253 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); 3254 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); 3255 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); 3256 3257 Res.first = OldExprVal; 3258 Res.second = Upd; 3259 3260 // set Insertion point in exit block 3261 if (UnreachableInst *ExitTI = 3262 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { 3263 CurBBTI->eraseFromParent(); 3264 Builder.SetInsertPoint(ExitBB); 3265 } else { 3266 Builder.SetInsertPoint(ExitTI); 3267 } 3268 } 3269 3270 return Res; 3271 } 3272 3273 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( 3274 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, 3275 AtomicOpValue &V, Value *Expr, AtomicOrdering AO, 3276 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, 3277 bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) { 3278 if (!updateToLocation(Loc)) 3279 return Loc.IP; 3280 3281 LLVM_DEBUG({ 3282 Type *XTy = X.Var->getType(); 3283 assert(XTy->isPointerTy() && 3284 "OMP Atomic expects a pointer to target memory"); 3285 Type *XElemTy = XTy->getPointerElementType(); 3286 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3287 XElemTy->isPointerTy()) && 3288 "OMP atomic capture expected a scalar type"); 3289 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3290 "OpenMP atomic does not support LT or GT operations"); 3291 }); 3292 3293 // If UpdateExpr is 'x' updated with some `expr` not based on 'x', 3294 // 'x' is simply atomically rewritten with 'expr'. 3295 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); 3296 std::pair<Value *, Value *> Result = emitAtomicUpdate( 3297 AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp, X.IsVolatile, IsXBinopExpr); 3298 3299 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); 3300 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile); 3301 3302 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); 3303 return Builder.saveIP(); 3304 } 3305 3306 GlobalVariable * 3307 OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, 3308 std::string VarName) { 3309 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( 3310 llvm::ArrayType::get( 3311 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), 3312 Names); 3313 auto *MapNamesArrayGlobal = new llvm::GlobalVariable( 3314 M, MapNamesArrayInit->getType(), 3315 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit, 3316 VarName); 3317 return MapNamesArrayGlobal; 3318 } 3319 3320 // Create all simple and struct types exposed by the runtime and remember 3321 // the llvm::PointerTypes of them for easy access later. 3322 void OpenMPIRBuilder::initializeTypes(Module &M) { 3323 LLVMContext &Ctx = M.getContext(); 3324 StructType *T; 3325 #define OMP_TYPE(VarName, InitValue) VarName = InitValue; 3326 #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ 3327 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \ 3328 VarName##PtrTy = PointerType::getUnqual(VarName##Ty); 3329 #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ 3330 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \ 3331 VarName##Ptr = PointerType::getUnqual(VarName); 3332 #define OMP_STRUCT_TYPE(VarName, StructName, ...) \ 3333 T = StructType::getTypeByName(Ctx, StructName); \ 3334 if (!T) \ 3335 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \ 3336 VarName = T; \ 3337 VarName##Ptr = PointerType::getUnqual(T); 3338 #include "llvm/Frontend/OpenMP/OMPKinds.def" 3339 } 3340 3341 void OpenMPIRBuilder::OutlineInfo::collectBlocks( 3342 SmallPtrSetImpl<BasicBlock *> &BlockSet, 3343 SmallVectorImpl<BasicBlock *> &BlockVector) { 3344 SmallVector<BasicBlock *, 32> Worklist; 3345 BlockSet.insert(EntryBB); 3346 BlockSet.insert(ExitBB); 3347 3348 Worklist.push_back(EntryBB); 3349 while (!Worklist.empty()) { 3350 BasicBlock *BB = Worklist.pop_back_val(); 3351 BlockVector.push_back(BB); 3352 for (BasicBlock *SuccBB : successors(BB)) 3353 if (BlockSet.insert(SuccBB).second) 3354 Worklist.push_back(SuccBB); 3355 } 3356 } 3357 3358 void CanonicalLoopInfo::collectControlBlocks( 3359 SmallVectorImpl<BasicBlock *> &BBs) { 3360 // We only count those BBs as control block for which we do not need to 3361 // reverse the CFG, i.e. not the loop body which can contain arbitrary control 3362 // flow. For consistency, this also means we do not add the Body block, which 3363 // is just the entry to the body code. 3364 BBs.reserve(BBs.size() + 6); 3365 BBs.append({getPreheader(), Header, Cond, Latch, Exit, getAfter()}); 3366 } 3367 3368 BasicBlock *CanonicalLoopInfo::getPreheader() const { 3369 assert(isValid() && "Requires a valid canonical loop"); 3370 for (BasicBlock *Pred : predecessors(Header)) { 3371 if (Pred != Latch) 3372 return Pred; 3373 } 3374 llvm_unreachable("Missing preheader"); 3375 } 3376 3377 void CanonicalLoopInfo::assertOK() const { 3378 #ifndef NDEBUG 3379 // No constraints if this object currently does not describe a loop. 3380 if (!isValid()) 3381 return; 3382 3383 BasicBlock *Preheader = getPreheader(); 3384 BasicBlock *Body = getBody(); 3385 BasicBlock *After = getAfter(); 3386 3387 // Verify standard control-flow we use for OpenMP loops. 3388 assert(Preheader); 3389 assert(isa<BranchInst>(Preheader->getTerminator()) && 3390 "Preheader must terminate with unconditional branch"); 3391 assert(Preheader->getSingleSuccessor() == Header && 3392 "Preheader must jump to header"); 3393 3394 assert(Header); 3395 assert(isa<BranchInst>(Header->getTerminator()) && 3396 "Header must terminate with unconditional branch"); 3397 assert(Header->getSingleSuccessor() == Cond && 3398 "Header must jump to exiting block"); 3399 3400 assert(Cond); 3401 assert(Cond->getSinglePredecessor() == Header && 3402 "Exiting block only reachable from header"); 3403 3404 assert(isa<BranchInst>(Cond->getTerminator()) && 3405 "Exiting block must terminate with conditional branch"); 3406 assert(size(successors(Cond)) == 2 && 3407 "Exiting block must have two successors"); 3408 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && 3409 "Exiting block's first successor jump to the body"); 3410 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && 3411 "Exiting block's second successor must exit the loop"); 3412 3413 assert(Body); 3414 assert(Body->getSinglePredecessor() == Cond && 3415 "Body only reachable from exiting block"); 3416 assert(!isa<PHINode>(Body->front())); 3417 3418 assert(Latch); 3419 assert(isa<BranchInst>(Latch->getTerminator()) && 3420 "Latch must terminate with unconditional branch"); 3421 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header"); 3422 // TODO: To support simple redirecting of the end of the body code that has 3423 // multiple; introduce another auxiliary basic block like preheader and after. 3424 assert(Latch->getSinglePredecessor() != nullptr); 3425 assert(!isa<PHINode>(Latch->front())); 3426 3427 assert(Exit); 3428 assert(isa<BranchInst>(Exit->getTerminator()) && 3429 "Exit block must terminate with unconditional branch"); 3430 assert(Exit->getSingleSuccessor() == After && 3431 "Exit block must jump to after block"); 3432 3433 assert(After); 3434 assert(After->getSinglePredecessor() == Exit && 3435 "After block only reachable from exit block"); 3436 assert(After->empty() || !isa<PHINode>(After->front())); 3437 3438 Instruction *IndVar = getIndVar(); 3439 assert(IndVar && "Canonical induction variable not found?"); 3440 assert(isa<IntegerType>(IndVar->getType()) && 3441 "Induction variable must be an integer"); 3442 assert(cast<PHINode>(IndVar)->getParent() == Header && 3443 "Induction variable must be a PHI in the loop header"); 3444 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader); 3445 assert( 3446 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()); 3447 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch); 3448 3449 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1); 3450 assert(cast<Instruction>(NextIndVar)->getParent() == Latch); 3451 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add); 3452 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar); 3453 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) 3454 ->isOne()); 3455 3456 Value *TripCount = getTripCount(); 3457 assert(TripCount && "Loop trip count not found?"); 3458 assert(IndVar->getType() == TripCount->getType() && 3459 "Trip count and induction variable must have the same type"); 3460 3461 auto *CmpI = cast<CmpInst>(&Cond->front()); 3462 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT && 3463 "Exit condition must be a signed less-than comparison"); 3464 assert(CmpI->getOperand(0) == IndVar && 3465 "Exit condition must compare the induction variable"); 3466 assert(CmpI->getOperand(1) == TripCount && 3467 "Exit condition must compare with the trip count"); 3468 #endif 3469 } 3470 3471 void CanonicalLoopInfo::invalidate() { 3472 Header = nullptr; 3473 Cond = nullptr; 3474 Latch = nullptr; 3475 Exit = nullptr; 3476 } 3477