1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides a generalized class for OpenMP runtime code generation 10 // specialized by GPU targets NVPTX and AMDGCN. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntimeGPU.h" 15 #include "CodeGenFunction.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/DeclOpenMP.h" 18 #include "clang/AST/StmtOpenMP.h" 19 #include "clang/AST/StmtVisitor.h" 20 #include "clang/Basic/Cuda.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/Frontend/OpenMP/OMPGridValues.h" 23 #include "llvm/Support/MathExtras.h" 24 25 using namespace clang; 26 using namespace CodeGen; 27 using namespace llvm::omp; 28 29 namespace { 30 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX. 31 class NVPTXActionTy final : public PrePostActionTy { 32 llvm::FunctionCallee EnterCallee = nullptr; 33 ArrayRef<llvm::Value *> EnterArgs; 34 llvm::FunctionCallee ExitCallee = nullptr; 35 ArrayRef<llvm::Value *> ExitArgs; 36 bool Conditional = false; 37 llvm::BasicBlock *ContBlock = nullptr; 38 39 public: 40 NVPTXActionTy(llvm::FunctionCallee EnterCallee, 41 ArrayRef<llvm::Value *> EnterArgs, 42 llvm::FunctionCallee ExitCallee, 43 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false) 44 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee), 45 ExitArgs(ExitArgs), Conditional(Conditional) {} 46 void Enter(CodeGenFunction &CGF) override { 47 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs); 48 if (Conditional) { 49 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes); 50 auto *ThenBlock = CGF.createBasicBlock("omp_if.then"); 51 ContBlock = CGF.createBasicBlock("omp_if.end"); 52 // Generate the branch (If-stmt) 53 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); 54 CGF.EmitBlock(ThenBlock); 55 } 56 } 57 void Done(CodeGenFunction &CGF) { 58 // Emit the rest of blocks/branches 59 CGF.EmitBranch(ContBlock); 60 CGF.EmitBlock(ContBlock, true); 61 } 62 void Exit(CodeGenFunction &CGF) override { 63 CGF.EmitRuntimeCall(ExitCallee, ExitArgs); 64 } 65 }; 66 67 /// A class to track the execution mode when codegening directives within 68 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry 69 /// to the target region and used by containing directives such as 'parallel' 70 /// to emit optimized code. 71 class ExecutionRuntimeModesRAII { 72 private: 73 CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode = 74 CGOpenMPRuntimeGPU::EM_Unknown; 75 CGOpenMPRuntimeGPU::ExecutionMode &ExecMode; 76 bool SavedRuntimeMode = false; 77 bool *RuntimeMode = nullptr; 78 79 public: 80 /// Constructor for Non-SPMD mode. 81 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode) 82 : ExecMode(ExecMode) { 83 SavedExecMode = ExecMode; 84 ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD; 85 } 86 /// Constructor for SPMD mode. 87 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode, 88 bool &RuntimeMode, bool FullRuntimeMode) 89 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) { 90 SavedExecMode = ExecMode; 91 SavedRuntimeMode = RuntimeMode; 92 ExecMode = CGOpenMPRuntimeGPU::EM_SPMD; 93 RuntimeMode = FullRuntimeMode; 94 } 95 ~ExecutionRuntimeModesRAII() { 96 ExecMode = SavedExecMode; 97 if (RuntimeMode) 98 *RuntimeMode = SavedRuntimeMode; 99 } 100 }; 101 102 /// GPU Configuration: This information can be derived from cuda registers, 103 /// however, providing compile time constants helps generate more efficient 104 /// code. For all practical purposes this is fine because the configuration 105 /// is the same for all known NVPTX architectures. 106 enum MachineConfiguration : unsigned { 107 /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target 108 /// specific Grid Values like GV_Warp_Size, GV_Slot_Size 109 110 /// Global memory alignment for performance. 111 GlobalMemoryAlignment = 128, 112 113 /// Maximal size of the shared memory buffer. 114 SharedMemorySize = 128, 115 }; 116 117 static const ValueDecl *getPrivateItem(const Expr *RefExpr) { 118 RefExpr = RefExpr->IgnoreParens(); 119 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) { 120 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts(); 121 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 122 Base = TempASE->getBase()->IgnoreParenImpCasts(); 123 RefExpr = Base; 124 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) { 125 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts(); 126 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) 127 Base = TempOASE->getBase()->IgnoreParenImpCasts(); 128 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 129 Base = TempASE->getBase()->IgnoreParenImpCasts(); 130 RefExpr = Base; 131 } 132 RefExpr = RefExpr->IgnoreParenImpCasts(); 133 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr)) 134 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl()); 135 const auto *ME = cast<MemberExpr>(RefExpr); 136 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl()); 137 } 138 139 140 static RecordDecl *buildRecordForGlobalizedVars( 141 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls, 142 ArrayRef<const ValueDecl *> EscapedDeclsForTeams, 143 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 144 &MappedDeclsFields, int BufSize) { 145 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>; 146 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty()) 147 return nullptr; 148 SmallVector<VarsDataTy, 4> GlobalizedVars; 149 for (const ValueDecl *D : EscapedDecls) 150 GlobalizedVars.emplace_back( 151 CharUnits::fromQuantity(std::max( 152 C.getDeclAlign(D).getQuantity(), 153 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))), 154 D); 155 for (const ValueDecl *D : EscapedDeclsForTeams) 156 GlobalizedVars.emplace_back(C.getDeclAlign(D), D); 157 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) { 158 return L.first > R.first; 159 }); 160 161 // Build struct _globalized_locals_ty { 162 // /* globalized vars */[WarSize] align (max(decl_align, 163 // GlobalMemoryAlignment)) 164 // /* globalized vars */ for EscapedDeclsForTeams 165 // }; 166 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty"); 167 GlobalizedRD->startDefinition(); 168 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped( 169 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end()); 170 for (const auto &Pair : GlobalizedVars) { 171 const ValueDecl *VD = Pair.second; 172 QualType Type = VD->getType(); 173 if (Type->isLValueReferenceType()) 174 Type = C.getPointerType(Type.getNonReferenceType()); 175 else 176 Type = Type.getNonReferenceType(); 177 SourceLocation Loc = VD->getLocation(); 178 FieldDecl *Field; 179 if (SingleEscaped.count(VD)) { 180 Field = FieldDecl::Create( 181 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 182 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 183 /*BW=*/nullptr, /*Mutable=*/false, 184 /*InitStyle=*/ICIS_NoInit); 185 Field->setAccess(AS_public); 186 if (VD->hasAttrs()) { 187 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()), 188 E(VD->getAttrs().end()); 189 I != E; ++I) 190 Field->addAttr(*I); 191 } 192 } else { 193 llvm::APInt ArraySize(32, BufSize); 194 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal, 195 0); 196 Field = FieldDecl::Create( 197 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 198 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 199 /*BW=*/nullptr, /*Mutable=*/false, 200 /*InitStyle=*/ICIS_NoInit); 201 Field->setAccess(AS_public); 202 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(), 203 static_cast<CharUnits::QuantityType>( 204 GlobalMemoryAlignment))); 205 Field->addAttr(AlignedAttr::CreateImplicit( 206 C, /*IsAlignmentExpr=*/true, 207 IntegerLiteral::Create(C, Align, 208 C.getIntTypeForBitwidth(32, /*Signed=*/0), 209 SourceLocation()), 210 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned)); 211 } 212 GlobalizedRD->addDecl(Field); 213 MappedDeclsFields.try_emplace(VD, Field); 214 } 215 GlobalizedRD->completeDefinition(); 216 return GlobalizedRD; 217 } 218 219 /// Get the list of variables that can escape their declaration context. 220 class CheckVarsEscapingDeclContext final 221 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> { 222 CodeGenFunction &CGF; 223 llvm::SetVector<const ValueDecl *> EscapedDecls; 224 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls; 225 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters; 226 RecordDecl *GlobalizedRD = nullptr; 227 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 228 bool AllEscaped = false; 229 bool IsForCombinedParallelRegion = false; 230 231 void markAsEscaped(const ValueDecl *VD) { 232 // Do not globalize declare target variables. 233 if (!isa<VarDecl>(VD) || 234 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 235 return; 236 VD = cast<ValueDecl>(VD->getCanonicalDecl()); 237 // Use user-specified allocation. 238 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>()) 239 return; 240 // Variables captured by value must be globalized. 241 if (auto *CSI = CGF.CapturedStmtInfo) { 242 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) { 243 // Check if need to capture the variable that was already captured by 244 // value in the outer region. 245 if (!IsForCombinedParallelRegion) { 246 if (!FD->hasAttrs()) 247 return; 248 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>(); 249 if (!Attr) 250 return; 251 if (((Attr->getCaptureKind() != OMPC_map) && 252 !isOpenMPPrivate(Attr->getCaptureKind())) || 253 ((Attr->getCaptureKind() == OMPC_map) && 254 !FD->getType()->isAnyPointerType())) 255 return; 256 } 257 if (!FD->getType()->isReferenceType()) { 258 assert(!VD->getType()->isVariablyModifiedType() && 259 "Parameter captured by value with variably modified type"); 260 EscapedParameters.insert(VD); 261 } else if (!IsForCombinedParallelRegion) { 262 return; 263 } 264 } 265 } 266 if ((!CGF.CapturedStmtInfo || 267 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) && 268 VD->getType()->isReferenceType()) 269 // Do not globalize variables with reference type. 270 return; 271 if (VD->getType()->isVariablyModifiedType()) 272 EscapedVariableLengthDecls.insert(VD); 273 else 274 EscapedDecls.insert(VD); 275 } 276 277 void VisitValueDecl(const ValueDecl *VD) { 278 if (VD->getType()->isLValueReferenceType()) 279 markAsEscaped(VD); 280 if (const auto *VarD = dyn_cast<VarDecl>(VD)) { 281 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) { 282 const bool SavedAllEscaped = AllEscaped; 283 AllEscaped = VD->getType()->isLValueReferenceType(); 284 Visit(VarD->getInit()); 285 AllEscaped = SavedAllEscaped; 286 } 287 } 288 } 289 void VisitOpenMPCapturedStmt(const CapturedStmt *S, 290 ArrayRef<OMPClause *> Clauses, 291 bool IsCombinedParallelRegion) { 292 if (!S) 293 return; 294 for (const CapturedStmt::Capture &C : S->captures()) { 295 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 296 const ValueDecl *VD = C.getCapturedVar(); 297 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion; 298 if (IsCombinedParallelRegion) { 299 // Check if the variable is privatized in the combined construct and 300 // those private copies must be shared in the inner parallel 301 // directive. 302 IsForCombinedParallelRegion = false; 303 for (const OMPClause *C : Clauses) { 304 if (!isOpenMPPrivate(C->getClauseKind()) || 305 C->getClauseKind() == OMPC_reduction || 306 C->getClauseKind() == OMPC_linear || 307 C->getClauseKind() == OMPC_private) 308 continue; 309 ArrayRef<const Expr *> Vars; 310 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C)) 311 Vars = PC->getVarRefs(); 312 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C)) 313 Vars = PC->getVarRefs(); 314 else 315 llvm_unreachable("Unexpected clause."); 316 for (const auto *E : Vars) { 317 const Decl *D = 318 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 319 if (D == VD->getCanonicalDecl()) { 320 IsForCombinedParallelRegion = true; 321 break; 322 } 323 } 324 if (IsForCombinedParallelRegion) 325 break; 326 } 327 } 328 markAsEscaped(VD); 329 if (isa<OMPCapturedExprDecl>(VD)) 330 VisitValueDecl(VD); 331 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion; 332 } 333 } 334 } 335 336 void buildRecordForGlobalizedVars(bool IsInTTDRegion) { 337 assert(!GlobalizedRD && 338 "Record for globalized variables is built already."); 339 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams; 340 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size; 341 if (IsInTTDRegion) 342 EscapedDeclsForTeams = EscapedDecls.getArrayRef(); 343 else 344 EscapedDeclsForParallel = EscapedDecls.getArrayRef(); 345 GlobalizedRD = ::buildRecordForGlobalizedVars( 346 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams, 347 MappedDeclsFields, WarpSize); 348 } 349 350 public: 351 CheckVarsEscapingDeclContext(CodeGenFunction &CGF, 352 ArrayRef<const ValueDecl *> TeamsReductions) 353 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) { 354 } 355 virtual ~CheckVarsEscapingDeclContext() = default; 356 void VisitDeclStmt(const DeclStmt *S) { 357 if (!S) 358 return; 359 for (const Decl *D : S->decls()) 360 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D)) 361 VisitValueDecl(VD); 362 } 363 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) { 364 if (!D) 365 return; 366 if (!D->hasAssociatedStmt()) 367 return; 368 if (const auto *S = 369 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) { 370 // Do not analyze directives that do not actually require capturing, 371 // like `omp for` or `omp simd` directives. 372 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 373 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind()); 374 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) { 375 VisitStmt(S->getCapturedStmt()); 376 return; 377 } 378 VisitOpenMPCapturedStmt( 379 S, D->clauses(), 380 CaptureRegions.back() == OMPD_parallel && 381 isOpenMPDistributeDirective(D->getDirectiveKind())); 382 } 383 } 384 void VisitCapturedStmt(const CapturedStmt *S) { 385 if (!S) 386 return; 387 for (const CapturedStmt::Capture &C : S->captures()) { 388 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 389 const ValueDecl *VD = C.getCapturedVar(); 390 markAsEscaped(VD); 391 if (isa<OMPCapturedExprDecl>(VD)) 392 VisitValueDecl(VD); 393 } 394 } 395 } 396 void VisitLambdaExpr(const LambdaExpr *E) { 397 if (!E) 398 return; 399 for (const LambdaCapture &C : E->captures()) { 400 if (C.capturesVariable()) { 401 if (C.getCaptureKind() == LCK_ByRef) { 402 const ValueDecl *VD = C.getCapturedVar(); 403 markAsEscaped(VD); 404 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD)) 405 VisitValueDecl(VD); 406 } 407 } 408 } 409 } 410 void VisitBlockExpr(const BlockExpr *E) { 411 if (!E) 412 return; 413 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) { 414 if (C.isByRef()) { 415 const VarDecl *VD = C.getVariable(); 416 markAsEscaped(VD); 417 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture()) 418 VisitValueDecl(VD); 419 } 420 } 421 } 422 void VisitCallExpr(const CallExpr *E) { 423 if (!E) 424 return; 425 for (const Expr *Arg : E->arguments()) { 426 if (!Arg) 427 continue; 428 if (Arg->isLValue()) { 429 const bool SavedAllEscaped = AllEscaped; 430 AllEscaped = true; 431 Visit(Arg); 432 AllEscaped = SavedAllEscaped; 433 } else { 434 Visit(Arg); 435 } 436 } 437 Visit(E->getCallee()); 438 } 439 void VisitDeclRefExpr(const DeclRefExpr *E) { 440 if (!E) 441 return; 442 const ValueDecl *VD = E->getDecl(); 443 if (AllEscaped) 444 markAsEscaped(VD); 445 if (isa<OMPCapturedExprDecl>(VD)) 446 VisitValueDecl(VD); 447 else if (const auto *VarD = dyn_cast<VarDecl>(VD)) 448 if (VarD->isInitCapture()) 449 VisitValueDecl(VD); 450 } 451 void VisitUnaryOperator(const UnaryOperator *E) { 452 if (!E) 453 return; 454 if (E->getOpcode() == UO_AddrOf) { 455 const bool SavedAllEscaped = AllEscaped; 456 AllEscaped = true; 457 Visit(E->getSubExpr()); 458 AllEscaped = SavedAllEscaped; 459 } else { 460 Visit(E->getSubExpr()); 461 } 462 } 463 void VisitImplicitCastExpr(const ImplicitCastExpr *E) { 464 if (!E) 465 return; 466 if (E->getCastKind() == CK_ArrayToPointerDecay) { 467 const bool SavedAllEscaped = AllEscaped; 468 AllEscaped = true; 469 Visit(E->getSubExpr()); 470 AllEscaped = SavedAllEscaped; 471 } else { 472 Visit(E->getSubExpr()); 473 } 474 } 475 void VisitExpr(const Expr *E) { 476 if (!E) 477 return; 478 bool SavedAllEscaped = AllEscaped; 479 if (!E->isLValue()) 480 AllEscaped = false; 481 for (const Stmt *Child : E->children()) 482 if (Child) 483 Visit(Child); 484 AllEscaped = SavedAllEscaped; 485 } 486 void VisitStmt(const Stmt *S) { 487 if (!S) 488 return; 489 for (const Stmt *Child : S->children()) 490 if (Child) 491 Visit(Child); 492 } 493 494 /// Returns the record that handles all the escaped local variables and used 495 /// instead of their original storage. 496 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) { 497 if (!GlobalizedRD) 498 buildRecordForGlobalizedVars(IsInTTDRegion); 499 return GlobalizedRD; 500 } 501 502 /// Returns the field in the globalized record for the escaped variable. 503 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const { 504 assert(GlobalizedRD && 505 "Record for globalized variables must be generated already."); 506 auto I = MappedDeclsFields.find(VD); 507 if (I == MappedDeclsFields.end()) 508 return nullptr; 509 return I->getSecond(); 510 } 511 512 /// Returns the list of the escaped local variables/parameters. 513 ArrayRef<const ValueDecl *> getEscapedDecls() const { 514 return EscapedDecls.getArrayRef(); 515 } 516 517 /// Checks if the escaped local variable is actually a parameter passed by 518 /// value. 519 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const { 520 return EscapedParameters; 521 } 522 523 /// Returns the list of the escaped variables with the variably modified 524 /// types. 525 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const { 526 return EscapedVariableLengthDecls.getArrayRef(); 527 } 528 }; 529 } // anonymous namespace 530 531 /// Get the id of the warp in the block. 532 /// We assume that the warp size is 32, which is always the case 533 /// on the NVPTX device, to generate more efficient code. 534 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) { 535 CGBuilderTy &Bld = CGF.Builder; 536 unsigned LaneIDBits = 537 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size); 538 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 539 return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id"); 540 } 541 542 /// Get the id of the current lane in the Warp. 543 /// We assume that the warp size is 32, which is always the case 544 /// on the NVPTX device, to generate more efficient code. 545 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) { 546 CGBuilderTy &Bld = CGF.Builder; 547 unsigned LaneIDBits = 548 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size); 549 unsigned LaneIDMask = ~0u >> (32u - LaneIDBits); 550 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 551 return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask), 552 "nvptx_lane_id"); 553 } 554 555 CGOpenMPRuntimeGPU::ExecutionMode 556 CGOpenMPRuntimeGPU::getExecutionMode() const { 557 return CurrentExecutionMode; 558 } 559 560 static CGOpenMPRuntimeGPU::DataSharingMode 561 getDataSharingMode(CodeGenModule &CGM) { 562 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA 563 : CGOpenMPRuntimeGPU::Generic; 564 } 565 566 /// Check for inner (nested) SPMD construct, if any 567 static bool hasNestedSPMDDirective(ASTContext &Ctx, 568 const OMPExecutableDirective &D) { 569 const auto *CS = D.getInnermostCapturedStmt(); 570 const auto *Body = 571 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 572 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 573 574 if (const auto *NestedDir = 575 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 576 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 577 switch (D.getDirectiveKind()) { 578 case OMPD_target: 579 if (isOpenMPParallelDirective(DKind)) 580 return true; 581 if (DKind == OMPD_teams) { 582 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 583 /*IgnoreCaptured=*/true); 584 if (!Body) 585 return false; 586 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 587 if (const auto *NND = 588 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 589 DKind = NND->getDirectiveKind(); 590 if (isOpenMPParallelDirective(DKind)) 591 return true; 592 } 593 } 594 return false; 595 case OMPD_target_teams: 596 return isOpenMPParallelDirective(DKind); 597 case OMPD_target_simd: 598 case OMPD_target_parallel: 599 case OMPD_target_parallel_for: 600 case OMPD_target_parallel_for_simd: 601 case OMPD_target_teams_distribute: 602 case OMPD_target_teams_distribute_simd: 603 case OMPD_target_teams_distribute_parallel_for: 604 case OMPD_target_teams_distribute_parallel_for_simd: 605 case OMPD_parallel: 606 case OMPD_for: 607 case OMPD_parallel_for: 608 case OMPD_parallel_master: 609 case OMPD_parallel_sections: 610 case OMPD_for_simd: 611 case OMPD_parallel_for_simd: 612 case OMPD_cancel: 613 case OMPD_cancellation_point: 614 case OMPD_ordered: 615 case OMPD_threadprivate: 616 case OMPD_allocate: 617 case OMPD_task: 618 case OMPD_simd: 619 case OMPD_sections: 620 case OMPD_section: 621 case OMPD_single: 622 case OMPD_master: 623 case OMPD_critical: 624 case OMPD_taskyield: 625 case OMPD_barrier: 626 case OMPD_taskwait: 627 case OMPD_taskgroup: 628 case OMPD_atomic: 629 case OMPD_flush: 630 case OMPD_depobj: 631 case OMPD_scan: 632 case OMPD_teams: 633 case OMPD_target_data: 634 case OMPD_target_exit_data: 635 case OMPD_target_enter_data: 636 case OMPD_distribute: 637 case OMPD_distribute_simd: 638 case OMPD_distribute_parallel_for: 639 case OMPD_distribute_parallel_for_simd: 640 case OMPD_teams_distribute: 641 case OMPD_teams_distribute_simd: 642 case OMPD_teams_distribute_parallel_for: 643 case OMPD_teams_distribute_parallel_for_simd: 644 case OMPD_target_update: 645 case OMPD_declare_simd: 646 case OMPD_declare_variant: 647 case OMPD_begin_declare_variant: 648 case OMPD_end_declare_variant: 649 case OMPD_declare_target: 650 case OMPD_end_declare_target: 651 case OMPD_declare_reduction: 652 case OMPD_declare_mapper: 653 case OMPD_taskloop: 654 case OMPD_taskloop_simd: 655 case OMPD_master_taskloop: 656 case OMPD_master_taskloop_simd: 657 case OMPD_parallel_master_taskloop: 658 case OMPD_parallel_master_taskloop_simd: 659 case OMPD_requires: 660 case OMPD_unknown: 661 default: 662 llvm_unreachable("Unexpected directive."); 663 } 664 } 665 666 return false; 667 } 668 669 static bool supportsSPMDExecutionMode(ASTContext &Ctx, 670 const OMPExecutableDirective &D) { 671 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 672 switch (DirectiveKind) { 673 case OMPD_target: 674 case OMPD_target_teams: 675 return hasNestedSPMDDirective(Ctx, D); 676 case OMPD_target_parallel: 677 case OMPD_target_parallel_for: 678 case OMPD_target_parallel_for_simd: 679 case OMPD_target_teams_distribute_parallel_for: 680 case OMPD_target_teams_distribute_parallel_for_simd: 681 case OMPD_target_simd: 682 case OMPD_target_teams_distribute_simd: 683 return true; 684 case OMPD_target_teams_distribute: 685 return false; 686 case OMPD_parallel: 687 case OMPD_for: 688 case OMPD_parallel_for: 689 case OMPD_parallel_master: 690 case OMPD_parallel_sections: 691 case OMPD_for_simd: 692 case OMPD_parallel_for_simd: 693 case OMPD_cancel: 694 case OMPD_cancellation_point: 695 case OMPD_ordered: 696 case OMPD_threadprivate: 697 case OMPD_allocate: 698 case OMPD_task: 699 case OMPD_simd: 700 case OMPD_sections: 701 case OMPD_section: 702 case OMPD_single: 703 case OMPD_master: 704 case OMPD_critical: 705 case OMPD_taskyield: 706 case OMPD_barrier: 707 case OMPD_taskwait: 708 case OMPD_taskgroup: 709 case OMPD_atomic: 710 case OMPD_flush: 711 case OMPD_depobj: 712 case OMPD_scan: 713 case OMPD_teams: 714 case OMPD_target_data: 715 case OMPD_target_exit_data: 716 case OMPD_target_enter_data: 717 case OMPD_distribute: 718 case OMPD_distribute_simd: 719 case OMPD_distribute_parallel_for: 720 case OMPD_distribute_parallel_for_simd: 721 case OMPD_teams_distribute: 722 case OMPD_teams_distribute_simd: 723 case OMPD_teams_distribute_parallel_for: 724 case OMPD_teams_distribute_parallel_for_simd: 725 case OMPD_target_update: 726 case OMPD_declare_simd: 727 case OMPD_declare_variant: 728 case OMPD_begin_declare_variant: 729 case OMPD_end_declare_variant: 730 case OMPD_declare_target: 731 case OMPD_end_declare_target: 732 case OMPD_declare_reduction: 733 case OMPD_declare_mapper: 734 case OMPD_taskloop: 735 case OMPD_taskloop_simd: 736 case OMPD_master_taskloop: 737 case OMPD_master_taskloop_simd: 738 case OMPD_parallel_master_taskloop: 739 case OMPD_parallel_master_taskloop_simd: 740 case OMPD_requires: 741 case OMPD_unknown: 742 default: 743 break; 744 } 745 llvm_unreachable( 746 "Unknown programming model for OpenMP directive on NVPTX target."); 747 } 748 749 /// Check if the directive is loops based and has schedule clause at all or has 750 /// static scheduling. 751 static bool hasStaticScheduling(const OMPExecutableDirective &D) { 752 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) && 753 isOpenMPLoopDirective(D.getDirectiveKind()) && 754 "Expected loop-based directive."); 755 return !D.hasClausesOfKind<OMPOrderedClause>() && 756 (!D.hasClausesOfKind<OMPScheduleClause>() || 757 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(), 758 [](const OMPScheduleClause *C) { 759 return C->getScheduleKind() == OMPC_SCHEDULE_static; 760 })); 761 } 762 763 /// Check for inner (nested) lightweight runtime construct, if any 764 static bool hasNestedLightweightDirective(ASTContext &Ctx, 765 const OMPExecutableDirective &D) { 766 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive."); 767 const auto *CS = D.getInnermostCapturedStmt(); 768 const auto *Body = 769 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 770 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 771 772 if (const auto *NestedDir = 773 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 774 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 775 switch (D.getDirectiveKind()) { 776 case OMPD_target: 777 if (isOpenMPParallelDirective(DKind) && 778 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 779 hasStaticScheduling(*NestedDir)) 780 return true; 781 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd) 782 return true; 783 if (DKind == OMPD_parallel) { 784 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 785 /*IgnoreCaptured=*/true); 786 if (!Body) 787 return false; 788 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 789 if (const auto *NND = 790 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 791 DKind = NND->getDirectiveKind(); 792 if (isOpenMPWorksharingDirective(DKind) && 793 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 794 return true; 795 } 796 } else if (DKind == OMPD_teams) { 797 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 798 /*IgnoreCaptured=*/true); 799 if (!Body) 800 return false; 801 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 802 if (const auto *NND = 803 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 804 DKind = NND->getDirectiveKind(); 805 if (isOpenMPParallelDirective(DKind) && 806 isOpenMPWorksharingDirective(DKind) && 807 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 808 return true; 809 if (DKind == OMPD_parallel) { 810 Body = NND->getInnermostCapturedStmt()->IgnoreContainers( 811 /*IgnoreCaptured=*/true); 812 if (!Body) 813 return false; 814 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 815 if (const auto *NND = 816 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 817 DKind = NND->getDirectiveKind(); 818 if (isOpenMPWorksharingDirective(DKind) && 819 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 820 return true; 821 } 822 } 823 } 824 } 825 return false; 826 case OMPD_target_teams: 827 if (isOpenMPParallelDirective(DKind) && 828 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 829 hasStaticScheduling(*NestedDir)) 830 return true; 831 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd) 832 return true; 833 if (DKind == OMPD_parallel) { 834 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 835 /*IgnoreCaptured=*/true); 836 if (!Body) 837 return false; 838 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 839 if (const auto *NND = 840 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 841 DKind = NND->getDirectiveKind(); 842 if (isOpenMPWorksharingDirective(DKind) && 843 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 844 return true; 845 } 846 } 847 return false; 848 case OMPD_target_parallel: 849 if (DKind == OMPD_simd) 850 return true; 851 return isOpenMPWorksharingDirective(DKind) && 852 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir); 853 case OMPD_target_teams_distribute: 854 case OMPD_target_simd: 855 case OMPD_target_parallel_for: 856 case OMPD_target_parallel_for_simd: 857 case OMPD_target_teams_distribute_simd: 858 case OMPD_target_teams_distribute_parallel_for: 859 case OMPD_target_teams_distribute_parallel_for_simd: 860 case OMPD_parallel: 861 case OMPD_for: 862 case OMPD_parallel_for: 863 case OMPD_parallel_master: 864 case OMPD_parallel_sections: 865 case OMPD_for_simd: 866 case OMPD_parallel_for_simd: 867 case OMPD_cancel: 868 case OMPD_cancellation_point: 869 case OMPD_ordered: 870 case OMPD_threadprivate: 871 case OMPD_allocate: 872 case OMPD_task: 873 case OMPD_simd: 874 case OMPD_sections: 875 case OMPD_section: 876 case OMPD_single: 877 case OMPD_master: 878 case OMPD_critical: 879 case OMPD_taskyield: 880 case OMPD_barrier: 881 case OMPD_taskwait: 882 case OMPD_taskgroup: 883 case OMPD_atomic: 884 case OMPD_flush: 885 case OMPD_depobj: 886 case OMPD_scan: 887 case OMPD_teams: 888 case OMPD_target_data: 889 case OMPD_target_exit_data: 890 case OMPD_target_enter_data: 891 case OMPD_distribute: 892 case OMPD_distribute_simd: 893 case OMPD_distribute_parallel_for: 894 case OMPD_distribute_parallel_for_simd: 895 case OMPD_teams_distribute: 896 case OMPD_teams_distribute_simd: 897 case OMPD_teams_distribute_parallel_for: 898 case OMPD_teams_distribute_parallel_for_simd: 899 case OMPD_target_update: 900 case OMPD_declare_simd: 901 case OMPD_declare_variant: 902 case OMPD_begin_declare_variant: 903 case OMPD_end_declare_variant: 904 case OMPD_declare_target: 905 case OMPD_end_declare_target: 906 case OMPD_declare_reduction: 907 case OMPD_declare_mapper: 908 case OMPD_taskloop: 909 case OMPD_taskloop_simd: 910 case OMPD_master_taskloop: 911 case OMPD_master_taskloop_simd: 912 case OMPD_parallel_master_taskloop: 913 case OMPD_parallel_master_taskloop_simd: 914 case OMPD_requires: 915 case OMPD_unknown: 916 default: 917 llvm_unreachable("Unexpected directive."); 918 } 919 } 920 921 return false; 922 } 923 924 /// Checks if the construct supports lightweight runtime. It must be SPMD 925 /// construct + inner loop-based construct with static scheduling. 926 static bool supportsLightweightRuntime(ASTContext &Ctx, 927 const OMPExecutableDirective &D) { 928 if (!supportsSPMDExecutionMode(Ctx, D)) 929 return false; 930 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 931 switch (DirectiveKind) { 932 case OMPD_target: 933 case OMPD_target_teams: 934 case OMPD_target_parallel: 935 return hasNestedLightweightDirective(Ctx, D); 936 case OMPD_target_parallel_for: 937 case OMPD_target_parallel_for_simd: 938 case OMPD_target_teams_distribute_parallel_for: 939 case OMPD_target_teams_distribute_parallel_for_simd: 940 // (Last|First)-privates must be shared in parallel region. 941 return hasStaticScheduling(D); 942 case OMPD_target_simd: 943 case OMPD_target_teams_distribute_simd: 944 return true; 945 case OMPD_target_teams_distribute: 946 return false; 947 case OMPD_parallel: 948 case OMPD_for: 949 case OMPD_parallel_for: 950 case OMPD_parallel_master: 951 case OMPD_parallel_sections: 952 case OMPD_for_simd: 953 case OMPD_parallel_for_simd: 954 case OMPD_cancel: 955 case OMPD_cancellation_point: 956 case OMPD_ordered: 957 case OMPD_threadprivate: 958 case OMPD_allocate: 959 case OMPD_task: 960 case OMPD_simd: 961 case OMPD_sections: 962 case OMPD_section: 963 case OMPD_single: 964 case OMPD_master: 965 case OMPD_critical: 966 case OMPD_taskyield: 967 case OMPD_barrier: 968 case OMPD_taskwait: 969 case OMPD_taskgroup: 970 case OMPD_atomic: 971 case OMPD_flush: 972 case OMPD_depobj: 973 case OMPD_scan: 974 case OMPD_teams: 975 case OMPD_target_data: 976 case OMPD_target_exit_data: 977 case OMPD_target_enter_data: 978 case OMPD_distribute: 979 case OMPD_distribute_simd: 980 case OMPD_distribute_parallel_for: 981 case OMPD_distribute_parallel_for_simd: 982 case OMPD_teams_distribute: 983 case OMPD_teams_distribute_simd: 984 case OMPD_teams_distribute_parallel_for: 985 case OMPD_teams_distribute_parallel_for_simd: 986 case OMPD_target_update: 987 case OMPD_declare_simd: 988 case OMPD_declare_variant: 989 case OMPD_begin_declare_variant: 990 case OMPD_end_declare_variant: 991 case OMPD_declare_target: 992 case OMPD_end_declare_target: 993 case OMPD_declare_reduction: 994 case OMPD_declare_mapper: 995 case OMPD_taskloop: 996 case OMPD_taskloop_simd: 997 case OMPD_master_taskloop: 998 case OMPD_master_taskloop_simd: 999 case OMPD_parallel_master_taskloop: 1000 case OMPD_parallel_master_taskloop_simd: 1001 case OMPD_requires: 1002 case OMPD_unknown: 1003 default: 1004 break; 1005 } 1006 llvm_unreachable( 1007 "Unknown programming model for OpenMP directive on NVPTX target."); 1008 } 1009 1010 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D, 1011 StringRef ParentName, 1012 llvm::Function *&OutlinedFn, 1013 llvm::Constant *&OutlinedFnID, 1014 bool IsOffloadEntry, 1015 const RegionCodeGenTy &CodeGen) { 1016 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode); 1017 EntryFunctionState EST; 1018 WrapperFunctionsMap.clear(); 1019 1020 // Emit target region as a standalone region. 1021 class NVPTXPrePostActionTy : public PrePostActionTy { 1022 CGOpenMPRuntimeGPU::EntryFunctionState &EST; 1023 1024 public: 1025 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST) 1026 : EST(EST) {} 1027 void Enter(CodeGenFunction &CGF) override { 1028 auto &RT = 1029 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1030 RT.emitKernelInit(CGF, EST, /* IsSPMD */ false); 1031 // Skip target region initialization. 1032 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1033 } 1034 void Exit(CodeGenFunction &CGF) override { 1035 auto &RT = 1036 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1037 RT.clearLocThreadIdInsertPt(CGF); 1038 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false); 1039 } 1040 } Action(EST); 1041 CodeGen.setAction(Action); 1042 IsInTTDRegion = true; 1043 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1044 IsOffloadEntry, CodeGen); 1045 IsInTTDRegion = false; 1046 } 1047 1048 void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF, 1049 EntryFunctionState &EST, bool IsSPMD) { 1050 CGBuilderTy &Bld = CGF.Builder; 1051 Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime())); 1052 IsInTargetMasterThreadRegion = IsSPMD; 1053 if (!IsSPMD) 1054 emitGenericVarsProlog(CGF, EST.Loc); 1055 } 1056 1057 void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF, 1058 EntryFunctionState &EST, 1059 bool IsSPMD) { 1060 if (!IsSPMD) 1061 emitGenericVarsEpilog(CGF); 1062 1063 CGBuilderTy &Bld = CGF.Builder; 1064 OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime()); 1065 } 1066 1067 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D, 1068 StringRef ParentName, 1069 llvm::Function *&OutlinedFn, 1070 llvm::Constant *&OutlinedFnID, 1071 bool IsOffloadEntry, 1072 const RegionCodeGenTy &CodeGen) { 1073 ExecutionRuntimeModesRAII ModeRAII( 1074 CurrentExecutionMode, RequiresFullRuntime, 1075 CGM.getLangOpts().OpenMPCUDAForceFullRuntime || 1076 !supportsLightweightRuntime(CGM.getContext(), D)); 1077 EntryFunctionState EST; 1078 1079 // Emit target region as a standalone region. 1080 class NVPTXPrePostActionTy : public PrePostActionTy { 1081 CGOpenMPRuntimeGPU &RT; 1082 CGOpenMPRuntimeGPU::EntryFunctionState &EST; 1083 1084 public: 1085 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT, 1086 CGOpenMPRuntimeGPU::EntryFunctionState &EST) 1087 : RT(RT), EST(EST) {} 1088 void Enter(CodeGenFunction &CGF) override { 1089 RT.emitKernelInit(CGF, EST, /* IsSPMD */ true); 1090 // Skip target region initialization. 1091 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1092 } 1093 void Exit(CodeGenFunction &CGF) override { 1094 RT.clearLocThreadIdInsertPt(CGF); 1095 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true); 1096 } 1097 } Action(*this, EST); 1098 CodeGen.setAction(Action); 1099 IsInTTDRegion = true; 1100 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1101 IsOffloadEntry, CodeGen); 1102 IsInTTDRegion = false; 1103 } 1104 1105 // Create a unique global variable to indicate the execution mode of this target 1106 // region. The execution mode is either 'generic', or 'spmd' depending on the 1107 // target directive. This variable is picked up by the offload library to setup 1108 // the device appropriately before kernel launch. If the execution mode is 1109 // 'generic', the runtime reserves one warp for the master, otherwise, all 1110 // warps participate in parallel work. 1111 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name, 1112 bool Mode) { 1113 auto *GVMode = new llvm::GlobalVariable( 1114 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, 1115 llvm::GlobalValue::WeakAnyLinkage, 1116 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? OMP_TGT_EXEC_MODE_SPMD 1117 : OMP_TGT_EXEC_MODE_GENERIC), 1118 Twine(Name, "_exec_mode")); 1119 CGM.addCompilerUsedGlobal(GVMode); 1120 } 1121 1122 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID, 1123 llvm::Constant *Addr, 1124 uint64_t Size, int32_t, 1125 llvm::GlobalValue::LinkageTypes) { 1126 // TODO: Add support for global variables on the device after declare target 1127 // support. 1128 if (!isa<llvm::Function>(Addr)) 1129 return; 1130 llvm::Module &M = CGM.getModule(); 1131 llvm::LLVMContext &Ctx = CGM.getLLVMContext(); 1132 1133 // Get "nvvm.annotations" metadata node 1134 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 1135 1136 llvm::Metadata *MDVals[] = { 1137 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"), 1138 llvm::ConstantAsMetadata::get( 1139 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))}; 1140 // Append metadata to nvvm.annotations 1141 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 1142 } 1143 1144 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction( 1145 const OMPExecutableDirective &D, StringRef ParentName, 1146 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, 1147 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { 1148 if (!IsOffloadEntry) // Nothing to do. 1149 return; 1150 1151 assert(!ParentName.empty() && "Invalid target region parent name!"); 1152 1153 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D); 1154 if (Mode) 1155 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1156 CodeGen); 1157 else 1158 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1159 CodeGen); 1160 1161 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode); 1162 } 1163 1164 namespace { 1165 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); 1166 /// Enum for accesseing the reserved_2 field of the ident_t struct. 1167 enum ModeFlagsTy : unsigned { 1168 /// Bit set to 1 when in SPMD mode. 1169 KMP_IDENT_SPMD_MODE = 0x01, 1170 /// Bit set to 1 when a simplified runtime is used. 1171 KMP_IDENT_SIMPLE_RT_MODE = 0x02, 1172 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE) 1173 }; 1174 1175 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime. 1176 static const ModeFlagsTy UndefinedMode = 1177 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE; 1178 } // anonymous namespace 1179 1180 unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const { 1181 switch (getExecutionMode()) { 1182 case EM_SPMD: 1183 if (requiresFullRuntime()) 1184 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE); 1185 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE; 1186 case EM_NonSPMD: 1187 assert(requiresFullRuntime() && "Expected full runtime."); 1188 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE); 1189 case EM_Unknown: 1190 return UndefinedMode; 1191 } 1192 llvm_unreachable("Unknown flags are requested."); 1193 } 1194 1195 CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM) 1196 : CGOpenMPRuntime(CGM, "_", "$") { 1197 if (!CGM.getLangOpts().OpenMPIsDevice) 1198 llvm_unreachable("OpenMP can only handle device code."); 1199 1200 llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder(); 1201 if (CGM.getLangOpts().OpenMPTargetNewRuntime) { 1202 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug, 1203 "__omp_rtl_debug_kind"); 1204 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription, 1205 "__omp_rtl_assume_teams_oversubscription"); 1206 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription, 1207 "__omp_rtl_assume_threads_oversubscription"); 1208 } 1209 } 1210 1211 void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF, 1212 ProcBindKind ProcBind, 1213 SourceLocation Loc) { 1214 // Do nothing in case of SPMD mode and L0 parallel. 1215 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 1216 return; 1217 1218 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc); 1219 } 1220 1221 void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF, 1222 llvm::Value *NumThreads, 1223 SourceLocation Loc) { 1224 // Nothing to do. 1225 } 1226 1227 void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF, 1228 const Expr *NumTeams, 1229 const Expr *ThreadLimit, 1230 SourceLocation Loc) {} 1231 1232 llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction( 1233 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1234 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1235 // Emit target region as a standalone region. 1236 class NVPTXPrePostActionTy : public PrePostActionTy { 1237 bool &IsInParallelRegion; 1238 bool PrevIsInParallelRegion; 1239 1240 public: 1241 NVPTXPrePostActionTy(bool &IsInParallelRegion) 1242 : IsInParallelRegion(IsInParallelRegion) {} 1243 void Enter(CodeGenFunction &CGF) override { 1244 PrevIsInParallelRegion = IsInParallelRegion; 1245 IsInParallelRegion = true; 1246 } 1247 void Exit(CodeGenFunction &CGF) override { 1248 IsInParallelRegion = PrevIsInParallelRegion; 1249 } 1250 } Action(IsInParallelRegion); 1251 CodeGen.setAction(Action); 1252 bool PrevIsInTTDRegion = IsInTTDRegion; 1253 IsInTTDRegion = false; 1254 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion; 1255 IsInTargetMasterThreadRegion = false; 1256 auto *OutlinedFun = 1257 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction( 1258 D, ThreadIDVar, InnermostKind, CodeGen)); 1259 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion; 1260 IsInTTDRegion = PrevIsInTTDRegion; 1261 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD && 1262 !IsInParallelRegion) { 1263 llvm::Function *WrapperFun = 1264 createParallelDataSharingWrapper(OutlinedFun, D); 1265 WrapperFunctionsMap[OutlinedFun] = WrapperFun; 1266 } 1267 1268 return OutlinedFun; 1269 } 1270 1271 /// Get list of lastprivate variables from the teams distribute ... or 1272 /// teams {distribute ...} directives. 1273 static void 1274 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D, 1275 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 1276 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 1277 "expected teams directive."); 1278 const OMPExecutableDirective *Dir = &D; 1279 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1280 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild( 1281 Ctx, 1282 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers( 1283 /*IgnoreCaptured=*/true))) { 1284 Dir = dyn_cast_or_null<OMPExecutableDirective>(S); 1285 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind())) 1286 Dir = nullptr; 1287 } 1288 } 1289 if (!Dir) 1290 return; 1291 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) { 1292 for (const Expr *E : C->getVarRefs()) 1293 Vars.push_back(getPrivateItem(E)); 1294 } 1295 } 1296 1297 /// Get list of reduction variables from the teams ... directives. 1298 static void 1299 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D, 1300 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 1301 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 1302 "expected teams directive."); 1303 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1304 for (const Expr *E : C->privates()) 1305 Vars.push_back(getPrivateItem(E)); 1306 } 1307 } 1308 1309 llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction( 1310 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1311 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1312 SourceLocation Loc = D.getBeginLoc(); 1313 1314 const RecordDecl *GlobalizedRD = nullptr; 1315 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions; 1316 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 1317 unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size; 1318 // Globalize team reductions variable unconditionally in all modes. 1319 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 1320 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions); 1321 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { 1322 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions); 1323 if (!LastPrivatesReductions.empty()) { 1324 GlobalizedRD = ::buildRecordForGlobalizedVars( 1325 CGM.getContext(), llvm::None, LastPrivatesReductions, 1326 MappedDeclsFields, WarpSize); 1327 } 1328 } else if (!LastPrivatesReductions.empty()) { 1329 assert(!TeamAndReductions.first && 1330 "Previous team declaration is not expected."); 1331 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl(); 1332 std::swap(TeamAndReductions.second, LastPrivatesReductions); 1333 } 1334 1335 // Emit target region as a standalone region. 1336 class NVPTXPrePostActionTy : public PrePostActionTy { 1337 SourceLocation &Loc; 1338 const RecordDecl *GlobalizedRD; 1339 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 1340 &MappedDeclsFields; 1341 1342 public: 1343 NVPTXPrePostActionTy( 1344 SourceLocation &Loc, const RecordDecl *GlobalizedRD, 1345 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 1346 &MappedDeclsFields) 1347 : Loc(Loc), GlobalizedRD(GlobalizedRD), 1348 MappedDeclsFields(MappedDeclsFields) {} 1349 void Enter(CodeGenFunction &CGF) override { 1350 auto &Rt = 1351 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1352 if (GlobalizedRD) { 1353 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 1354 I->getSecond().MappedParams = 1355 std::make_unique<CodeGenFunction::OMPMapVars>(); 1356 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 1357 for (const auto &Pair : MappedDeclsFields) { 1358 assert(Pair.getFirst()->isCanonicalDecl() && 1359 "Expected canonical declaration"); 1360 Data.insert(std::make_pair(Pair.getFirst(), MappedVarData())); 1361 } 1362 } 1363 Rt.emitGenericVarsProlog(CGF, Loc); 1364 } 1365 void Exit(CodeGenFunction &CGF) override { 1366 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) 1367 .emitGenericVarsEpilog(CGF); 1368 } 1369 } Action(Loc, GlobalizedRD, MappedDeclsFields); 1370 CodeGen.setAction(Action); 1371 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction( 1372 D, ThreadIDVar, InnermostKind, CodeGen); 1373 1374 return OutlinedFun; 1375 } 1376 1377 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF, 1378 SourceLocation Loc, 1379 bool WithSPMDCheck) { 1380 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic && 1381 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 1382 return; 1383 1384 CGBuilderTy &Bld = CGF.Builder; 1385 1386 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 1387 if (I == FunctionGlobalizedDecls.end()) 1388 return; 1389 1390 for (auto &Rec : I->getSecond().LocalVarData) { 1391 const auto *VD = cast<VarDecl>(Rec.first); 1392 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first); 1393 QualType VarTy = VD->getType(); 1394 1395 // Get the local allocation of a firstprivate variable before sharing 1396 llvm::Value *ParValue; 1397 if (EscapedParam) { 1398 LValue ParLVal = 1399 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); 1400 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc); 1401 } 1402 1403 // Allocate space for the variable to be globalized 1404 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())}; 1405 llvm::Instruction *VoidPtr = 1406 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1407 CGM.getModule(), OMPRTL___kmpc_alloc_shared), 1408 AllocArgs, VD->getName()); 1409 1410 // Cast the void pointer and get the address of the globalized variable. 1411 llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo(); 1412 llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 1413 VoidPtr, VarPtrTy, VD->getName() + "_on_stack"); 1414 LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy); 1415 Rec.second.PrivateAddr = VarAddr.getAddress(CGF); 1416 Rec.second.GlobalizedVal = VoidPtr; 1417 1418 // Assign the local allocation to the newly globalized location. 1419 if (EscapedParam) { 1420 CGF.EmitStoreOfScalar(ParValue, VarAddr); 1421 I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF)); 1422 } 1423 if (auto *DI = CGF.getDebugInfo()) 1424 VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation())); 1425 } 1426 for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) { 1427 // Use actual memory size of the VLA object including the padding 1428 // for alignment purposes. 1429 llvm::Value *Size = CGF.getTypeSize(VD->getType()); 1430 CharUnits Align = CGM.getContext().getDeclAlign(VD); 1431 Size = Bld.CreateNUWAdd( 1432 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1)); 1433 llvm::Value *AlignVal = 1434 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity()); 1435 1436 Size = Bld.CreateUDiv(Size, AlignVal); 1437 Size = Bld.CreateNUWMul(Size, AlignVal); 1438 1439 // Allocate space for this VLA object to be globalized. 1440 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())}; 1441 llvm::Instruction *VoidPtr = 1442 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1443 CGM.getModule(), OMPRTL___kmpc_alloc_shared), 1444 AllocArgs, VD->getName()); 1445 1446 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back( 1447 std::pair<llvm::Value *, llvm::Value *>( 1448 {VoidPtr, CGF.getTypeSize(VD->getType())})); 1449 LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(), 1450 CGM.getContext().getDeclAlign(VD), 1451 AlignmentSource::Decl); 1452 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD), 1453 Base.getAddress(CGF)); 1454 } 1455 I->getSecond().MappedParams->apply(CGF); 1456 } 1457 1458 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF, 1459 bool WithSPMDCheck) { 1460 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic && 1461 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 1462 return; 1463 1464 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 1465 if (I != FunctionGlobalizedDecls.end()) { 1466 // Deallocate the memory for each globalized VLA object 1467 for (auto AddrSizePair : 1468 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) { 1469 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1470 CGM.getModule(), OMPRTL___kmpc_free_shared), 1471 {AddrSizePair.first, AddrSizePair.second}); 1472 } 1473 // Deallocate the memory for each globalized value 1474 for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) { 1475 const auto *VD = cast<VarDecl>(Rec.first); 1476 I->getSecond().MappedParams->restore(CGF); 1477 1478 llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal, 1479 CGF.getTypeSize(VD->getType())}; 1480 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1481 CGM.getModule(), OMPRTL___kmpc_free_shared), 1482 FreeArgs); 1483 } 1484 } 1485 } 1486 1487 void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF, 1488 const OMPExecutableDirective &D, 1489 SourceLocation Loc, 1490 llvm::Function *OutlinedFn, 1491 ArrayRef<llvm::Value *> CapturedVars) { 1492 if (!CGF.HaveInsertPoint()) 1493 return; 1494 1495 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 1496 /*Name=*/".zero.addr"); 1497 CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr); 1498 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 1499 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer()); 1500 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 1501 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 1502 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); 1503 } 1504 1505 void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF, 1506 SourceLocation Loc, 1507 llvm::Function *OutlinedFn, 1508 ArrayRef<llvm::Value *> CapturedVars, 1509 const Expr *IfCond, 1510 llvm::Value *NumThreads) { 1511 if (!CGF.HaveInsertPoint()) 1512 return; 1513 1514 auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond, 1515 NumThreads](CodeGenFunction &CGF, 1516 PrePostActionTy &Action) { 1517 CGBuilderTy &Bld = CGF.Builder; 1518 llvm::Value *NumThreadsVal = NumThreads; 1519 llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn]; 1520 llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy); 1521 if (WFn) 1522 ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy); 1523 llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy); 1524 1525 // Create a private scope that will globalize the arguments 1526 // passed from the outside of the target region. 1527 // TODO: Is that needed? 1528 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF); 1529 1530 Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca( 1531 llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()), 1532 "captured_vars_addrs"); 1533 // There's something to share. 1534 if (!CapturedVars.empty()) { 1535 // Prepare for parallel region. Indicate the outlined function. 1536 ASTContext &Ctx = CGF.getContext(); 1537 unsigned Idx = 0; 1538 for (llvm::Value *V : CapturedVars) { 1539 Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx); 1540 llvm::Value *PtrV; 1541 if (V->getType()->isIntegerTy()) 1542 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy); 1543 else 1544 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy); 1545 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false, 1546 Ctx.getPointerType(Ctx.VoidPtrTy)); 1547 ++Idx; 1548 } 1549 } 1550 1551 llvm::Value *IfCondVal = nullptr; 1552 if (IfCond) 1553 IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty, 1554 /* isSigned */ false); 1555 else 1556 IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1); 1557 1558 if (!NumThreadsVal) 1559 NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1); 1560 else 1561 NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty), 1562 1563 assert(IfCondVal && "Expected a value"); 1564 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 1565 llvm::Value *Args[] = { 1566 RTLoc, 1567 getThreadID(CGF, Loc), 1568 IfCondVal, 1569 NumThreadsVal, 1570 llvm::ConstantInt::get(CGF.Int32Ty, -1), 1571 FnPtr, 1572 ID, 1573 Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(), 1574 CGF.VoidPtrPtrTy), 1575 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())}; 1576 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1577 CGM.getModule(), OMPRTL___kmpc_parallel_51), 1578 Args); 1579 }; 1580 1581 RegionCodeGenTy RCG(ParallelGen); 1582 RCG(CGF); 1583 } 1584 1585 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) { 1586 // Always emit simple barriers! 1587 if (!CGF.HaveInsertPoint()) 1588 return; 1589 // Build call __kmpc_barrier_simple_spmd(nullptr, 0); 1590 // This function does not use parameters, so we can emit just default values. 1591 llvm::Value *Args[] = { 1592 llvm::ConstantPointerNull::get( 1593 cast<llvm::PointerType>(getIdentTyPointerTy())), 1594 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)}; 1595 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1596 CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd), 1597 Args); 1598 } 1599 1600 void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF, 1601 SourceLocation Loc, 1602 OpenMPDirectiveKind Kind, bool, 1603 bool) { 1604 // Always emit simple barriers! 1605 if (!CGF.HaveInsertPoint()) 1606 return; 1607 // Build call __kmpc_cancel_barrier(loc, thread_id); 1608 unsigned Flags = getDefaultFlagsForBarriers(Kind); 1609 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), 1610 getThreadID(CGF, Loc)}; 1611 1612 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1613 CGM.getModule(), OMPRTL___kmpc_barrier), 1614 Args); 1615 } 1616 1617 void CGOpenMPRuntimeGPU::emitCriticalRegion( 1618 CodeGenFunction &CGF, StringRef CriticalName, 1619 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, 1620 const Expr *Hint) { 1621 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop"); 1622 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test"); 1623 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync"); 1624 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body"); 1625 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit"); 1626 1627 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1628 1629 // Get the mask of active threads in the warp. 1630 llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1631 CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask)); 1632 // Fetch team-local id of the thread. 1633 llvm::Value *ThreadID = RT.getGPUThreadID(CGF); 1634 1635 // Get the width of the team. 1636 llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF); 1637 1638 // Initialize the counter variable for the loop. 1639 QualType Int32Ty = 1640 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0); 1641 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter"); 1642 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty); 1643 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal, 1644 /*isInit=*/true); 1645 1646 // Block checks if loop counter exceeds upper bound. 1647 CGF.EmitBlock(LoopBB); 1648 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 1649 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth); 1650 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB); 1651 1652 // Block tests which single thread should execute region, and which threads 1653 // should go straight to synchronisation point. 1654 CGF.EmitBlock(TestBB); 1655 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 1656 llvm::Value *CmpThreadToCounter = 1657 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal); 1658 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB); 1659 1660 // Block emits the body of the critical region. 1661 CGF.EmitBlock(BodyBB); 1662 1663 // Output the critical statement. 1664 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc, 1665 Hint); 1666 1667 // After the body surrounded by the critical region, the single executing 1668 // thread will jump to the synchronisation point. 1669 // Block waits for all threads in current team to finish then increments the 1670 // counter variable and returns to the loop. 1671 CGF.EmitBlock(SyncBB); 1672 // Reconverge active threads in the warp. 1673 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1674 CGM.getModule(), OMPRTL___kmpc_syncwarp), 1675 Mask); 1676 1677 llvm::Value *IncCounterVal = 1678 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1)); 1679 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal); 1680 CGF.EmitBranch(LoopBB); 1681 1682 // Block that is reached when all threads in the team complete the region. 1683 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1684 } 1685 1686 /// Cast value to the specified type. 1687 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val, 1688 QualType ValTy, QualType CastTy, 1689 SourceLocation Loc) { 1690 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && 1691 "Cast type must sized."); 1692 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && 1693 "Val type must sized."); 1694 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy); 1695 if (ValTy == CastTy) 1696 return Val; 1697 if (CGF.getContext().getTypeSizeInChars(ValTy) == 1698 CGF.getContext().getTypeSizeInChars(CastTy)) 1699 return CGF.Builder.CreateBitCast(Val, LLVMCastTy); 1700 if (CastTy->isIntegerType() && ValTy->isIntegerType()) 1701 return CGF.Builder.CreateIntCast(Val, LLVMCastTy, 1702 CastTy->hasSignedIntegerRepresentation()); 1703 Address CastItem = CGF.CreateMemTemp(CastTy); 1704 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1705 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace())); 1706 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy, 1707 LValueBaseInfo(AlignmentSource::Type), 1708 TBAAAccessInfo()); 1709 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc, 1710 LValueBaseInfo(AlignmentSource::Type), 1711 TBAAAccessInfo()); 1712 } 1713 1714 /// This function creates calls to one of two shuffle functions to copy 1715 /// variables between lanes in a warp. 1716 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF, 1717 llvm::Value *Elem, 1718 QualType ElemType, 1719 llvm::Value *Offset, 1720 SourceLocation Loc) { 1721 CodeGenModule &CGM = CGF.CGM; 1722 CGBuilderTy &Bld = CGF.Builder; 1723 CGOpenMPRuntimeGPU &RT = 1724 *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime())); 1725 llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder(); 1726 1727 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 1728 assert(Size.getQuantity() <= 8 && 1729 "Unsupported bitwidth in shuffle instruction."); 1730 1731 RuntimeFunction ShuffleFn = Size.getQuantity() <= 4 1732 ? OMPRTL___kmpc_shuffle_int32 1733 : OMPRTL___kmpc_shuffle_int64; 1734 1735 // Cast all types to 32- or 64-bit values before calling shuffle routines. 1736 QualType CastTy = CGF.getContext().getIntTypeForBitwidth( 1737 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1); 1738 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc); 1739 llvm::Value *WarpSize = 1740 Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true); 1741 1742 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall( 1743 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn), 1744 {ElemCast, Offset, WarpSize}); 1745 1746 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc); 1747 } 1748 1749 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, 1750 Address DestAddr, QualType ElemType, 1751 llvm::Value *Offset, SourceLocation Loc) { 1752 CGBuilderTy &Bld = CGF.Builder; 1753 1754 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 1755 // Create the loop over the big sized data. 1756 // ptr = (void*)Elem; 1757 // ptrEnd = (void*) Elem + 1; 1758 // Step = 8; 1759 // while (ptr + Step < ptrEnd) 1760 // shuffle((int64_t)*ptr); 1761 // Step = 4; 1762 // while (ptr + Step < ptrEnd) 1763 // shuffle((int32_t)*ptr); 1764 // ... 1765 Address ElemPtr = DestAddr; 1766 Address Ptr = SrcAddr; 1767 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast( 1768 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy); 1769 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) { 1770 if (Size < CharUnits::fromQuantity(IntSize)) 1771 continue; 1772 QualType IntType = CGF.getContext().getIntTypeForBitwidth( 1773 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)), 1774 /*Signed=*/1); 1775 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType); 1776 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo()); 1777 ElemPtr = 1778 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo()); 1779 if (Size.getQuantity() / IntSize > 1) { 1780 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond"); 1781 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then"); 1782 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit"); 1783 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock(); 1784 CGF.EmitBlock(PreCondBB); 1785 llvm::PHINode *PhiSrc = 1786 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2); 1787 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB); 1788 llvm::PHINode *PhiDest = 1789 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2); 1790 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB); 1791 Ptr = Address(PhiSrc, Ptr.getAlignment()); 1792 ElemPtr = Address(PhiDest, ElemPtr.getAlignment()); 1793 llvm::Value *PtrDiff = Bld.CreatePtrDiff( 1794 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast( 1795 Ptr.getPointer(), CGF.VoidPtrTy)); 1796 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)), 1797 ThenBB, ExitBB); 1798 CGF.EmitBlock(ThenBB); 1799 llvm::Value *Res = createRuntimeShuffleFunction( 1800 CGF, 1801 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 1802 LValueBaseInfo(AlignmentSource::Type), 1803 TBAAAccessInfo()), 1804 IntType, Offset, Loc); 1805 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 1806 LValueBaseInfo(AlignmentSource::Type), 1807 TBAAAccessInfo()); 1808 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1); 1809 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 1810 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB); 1811 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB); 1812 CGF.EmitBranch(PreCondBB); 1813 CGF.EmitBlock(ExitBB); 1814 } else { 1815 llvm::Value *Res = createRuntimeShuffleFunction( 1816 CGF, 1817 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 1818 LValueBaseInfo(AlignmentSource::Type), 1819 TBAAAccessInfo()), 1820 IntType, Offset, Loc); 1821 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 1822 LValueBaseInfo(AlignmentSource::Type), 1823 TBAAAccessInfo()); 1824 Ptr = Bld.CreateConstGEP(Ptr, 1); 1825 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 1826 } 1827 Size = Size % IntSize; 1828 } 1829 } 1830 1831 namespace { 1832 enum CopyAction : unsigned { 1833 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in 1834 // the warp using shuffle instructions. 1835 RemoteLaneToThread, 1836 // ThreadCopy: Make a copy of a Reduce list on the thread's stack. 1837 ThreadCopy, 1838 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad. 1839 ThreadToScratchpad, 1840 // ScratchpadToThread: Copy from a scratchpad array in global memory 1841 // containing team-reduced data to a thread's stack. 1842 ScratchpadToThread, 1843 }; 1844 } // namespace 1845 1846 struct CopyOptionsTy { 1847 llvm::Value *RemoteLaneOffset; 1848 llvm::Value *ScratchpadIndex; 1849 llvm::Value *ScratchpadWidth; 1850 }; 1851 1852 /// Emit instructions to copy a Reduce list, which contains partially 1853 /// aggregated values, in the specified direction. 1854 static void emitReductionListCopy( 1855 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy, 1856 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase, 1857 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) { 1858 1859 CodeGenModule &CGM = CGF.CGM; 1860 ASTContext &C = CGM.getContext(); 1861 CGBuilderTy &Bld = CGF.Builder; 1862 1863 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset; 1864 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex; 1865 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth; 1866 1867 // Iterates, element-by-element, through the source Reduce list and 1868 // make a copy. 1869 unsigned Idx = 0; 1870 unsigned Size = Privates.size(); 1871 for (const Expr *Private : Privates) { 1872 Address SrcElementAddr = Address::invalid(); 1873 Address DestElementAddr = Address::invalid(); 1874 Address DestElementPtrAddr = Address::invalid(); 1875 // Should we shuffle in an element from a remote lane? 1876 bool ShuffleInElement = false; 1877 // Set to true to update the pointer in the dest Reduce list to a 1878 // newly created element. 1879 bool UpdateDestListPtr = false; 1880 // Increment the src or dest pointer to the scratchpad, for each 1881 // new element. 1882 bool IncrScratchpadSrc = false; 1883 bool IncrScratchpadDest = false; 1884 1885 switch (Action) { 1886 case RemoteLaneToThread: { 1887 // Step 1.1: Get the address for the src element in the Reduce list. 1888 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 1889 SrcElementAddr = CGF.EmitLoadOfPointer( 1890 SrcElementPtrAddr, 1891 C.getPointerType(Private->getType())->castAs<PointerType>()); 1892 1893 // Step 1.2: Create a temporary to store the element in the destination 1894 // Reduce list. 1895 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 1896 DestElementAddr = 1897 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 1898 ShuffleInElement = true; 1899 UpdateDestListPtr = true; 1900 break; 1901 } 1902 case ThreadCopy: { 1903 // Step 1.1: Get the address for the src element in the Reduce list. 1904 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 1905 SrcElementAddr = CGF.EmitLoadOfPointer( 1906 SrcElementPtrAddr, 1907 C.getPointerType(Private->getType())->castAs<PointerType>()); 1908 1909 // Step 1.2: Get the address for dest element. The destination 1910 // element has already been created on the thread's stack. 1911 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 1912 DestElementAddr = CGF.EmitLoadOfPointer( 1913 DestElementPtrAddr, 1914 C.getPointerType(Private->getType())->castAs<PointerType>()); 1915 break; 1916 } 1917 case ThreadToScratchpad: { 1918 // Step 1.1: Get the address for the src element in the Reduce list. 1919 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 1920 SrcElementAddr = CGF.EmitLoadOfPointer( 1921 SrcElementPtrAddr, 1922 C.getPointerType(Private->getType())->castAs<PointerType>()); 1923 1924 // Step 1.2: Get the address for dest element: 1925 // address = base + index * ElementSizeInChars. 1926 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 1927 llvm::Value *CurrentOffset = 1928 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 1929 llvm::Value *ScratchPadElemAbsolutePtrVal = 1930 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset); 1931 ScratchPadElemAbsolutePtrVal = 1932 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 1933 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, 1934 C.getTypeAlignInChars(Private->getType())); 1935 IncrScratchpadDest = true; 1936 break; 1937 } 1938 case ScratchpadToThread: { 1939 // Step 1.1: Get the address for the src element in the scratchpad. 1940 // address = base + index * ElementSizeInChars. 1941 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 1942 llvm::Value *CurrentOffset = 1943 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 1944 llvm::Value *ScratchPadElemAbsolutePtrVal = 1945 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset); 1946 ScratchPadElemAbsolutePtrVal = 1947 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 1948 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, 1949 C.getTypeAlignInChars(Private->getType())); 1950 IncrScratchpadSrc = true; 1951 1952 // Step 1.2: Create a temporary to store the element in the destination 1953 // Reduce list. 1954 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 1955 DestElementAddr = 1956 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 1957 UpdateDestListPtr = true; 1958 break; 1959 } 1960 } 1961 1962 // Regardless of src and dest of copy, we emit the load of src 1963 // element as this is required in all directions 1964 SrcElementAddr = Bld.CreateElementBitCast( 1965 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType())); 1966 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr, 1967 SrcElementAddr.getElementType()); 1968 1969 // Now that all active lanes have read the element in the 1970 // Reduce list, shuffle over the value from the remote lane. 1971 if (ShuffleInElement) { 1972 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(), 1973 RemoteLaneOffset, Private->getExprLoc()); 1974 } else { 1975 switch (CGF.getEvaluationKind(Private->getType())) { 1976 case TEK_Scalar: { 1977 llvm::Value *Elem = CGF.EmitLoadOfScalar( 1978 SrcElementAddr, /*Volatile=*/false, Private->getType(), 1979 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type), 1980 TBAAAccessInfo()); 1981 // Store the source element value to the dest element address. 1982 CGF.EmitStoreOfScalar( 1983 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(), 1984 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 1985 break; 1986 } 1987 case TEK_Complex: { 1988 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex( 1989 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 1990 Private->getExprLoc()); 1991 CGF.EmitStoreOfComplex( 1992 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 1993 /*isInit=*/false); 1994 break; 1995 } 1996 case TEK_Aggregate: 1997 CGF.EmitAggregateCopy( 1998 CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 1999 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 2000 Private->getType(), AggValueSlot::DoesNotOverlap); 2001 break; 2002 } 2003 } 2004 2005 // Step 3.1: Modify reference in dest Reduce list as needed. 2006 // Modifying the reference in Reduce list to point to the newly 2007 // created element. The element is live in the current function 2008 // scope and that of functions it invokes (i.e., reduce_function). 2009 // RemoteReduceData[i] = (void*)&RemoteElem 2010 if (UpdateDestListPtr) { 2011 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast( 2012 DestElementAddr.getPointer(), CGF.VoidPtrTy), 2013 DestElementPtrAddr, /*Volatile=*/false, 2014 C.VoidPtrTy); 2015 } 2016 2017 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting 2018 // address of the next element in scratchpad memory, unless we're currently 2019 // processing the last one. Memory alignment is also taken care of here. 2020 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) { 2021 llvm::Value *ScratchpadBasePtr = 2022 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer(); 2023 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 2024 ScratchpadBasePtr = Bld.CreateNUWAdd( 2025 ScratchpadBasePtr, 2026 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars)); 2027 2028 // Take care of global memory alignment for performance 2029 ScratchpadBasePtr = Bld.CreateNUWSub( 2030 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 2031 ScratchpadBasePtr = Bld.CreateUDiv( 2032 ScratchpadBasePtr, 2033 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 2034 ScratchpadBasePtr = Bld.CreateNUWAdd( 2035 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 2036 ScratchpadBasePtr = Bld.CreateNUWMul( 2037 ScratchpadBasePtr, 2038 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 2039 2040 if (IncrScratchpadDest) 2041 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 2042 else /* IncrScratchpadSrc = true */ 2043 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 2044 } 2045 2046 ++Idx; 2047 } 2048 } 2049 2050 /// This function emits a helper that gathers Reduce lists from the first 2051 /// lane of every active warp to lanes in the first warp. 2052 /// 2053 /// void inter_warp_copy_func(void* reduce_data, num_warps) 2054 /// shared smem[warp_size]; 2055 /// For all data entries D in reduce_data: 2056 /// sync 2057 /// If (I am the first lane in each warp) 2058 /// Copy my local D to smem[warp_id] 2059 /// sync 2060 /// if (I am the first warp) 2061 /// Copy smem[thread_id] to my local D 2062 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM, 2063 ArrayRef<const Expr *> Privates, 2064 QualType ReductionArrayTy, 2065 SourceLocation Loc) { 2066 ASTContext &C = CGM.getContext(); 2067 llvm::Module &M = CGM.getModule(); 2068 2069 // ReduceList: thread local Reduce list. 2070 // At the stage of the computation when this function is called, partially 2071 // aggregated values reside in the first lane of every active warp. 2072 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2073 C.VoidPtrTy, ImplicitParamDecl::Other); 2074 // NumWarps: number of warps active in the parallel region. This could 2075 // be smaller than 32 (max warps in a CTA) for partial block reduction. 2076 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2077 C.getIntTypeForBitwidth(32, /* Signed */ true), 2078 ImplicitParamDecl::Other); 2079 FunctionArgList Args; 2080 Args.push_back(&ReduceListArg); 2081 Args.push_back(&NumWarpsArg); 2082 2083 const CGFunctionInfo &CGFI = 2084 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2085 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI), 2086 llvm::GlobalValue::InternalLinkage, 2087 "_omp_reduction_inter_warp_copy_func", &M); 2088 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2089 Fn->setDoesNotRecurse(); 2090 CodeGenFunction CGF(CGM); 2091 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2092 2093 CGBuilderTy &Bld = CGF.Builder; 2094 2095 // This array is used as a medium to transfer, one reduce element at a time, 2096 // the data from the first lane of every warp to lanes in the first warp 2097 // in order to perform the final step of a reduction in a parallel region 2098 // (reduction across warps). The array is placed in NVPTX __shared__ memory 2099 // for reduced latency, as well as to have a distinct copy for concurrently 2100 // executing target regions. The array is declared with common linkage so 2101 // as to be shared across compilation units. 2102 StringRef TransferMediumName = 2103 "__openmp_nvptx_data_transfer_temporary_storage"; 2104 llvm::GlobalVariable *TransferMedium = 2105 M.getGlobalVariable(TransferMediumName); 2106 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size; 2107 if (!TransferMedium) { 2108 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize); 2109 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared); 2110 TransferMedium = new llvm::GlobalVariable( 2111 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage, 2112 llvm::UndefValue::get(Ty), TransferMediumName, 2113 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, 2114 SharedAddressSpace); 2115 CGM.addCompilerUsedGlobal(TransferMedium); 2116 } 2117 2118 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 2119 // Get the CUDA thread id of the current OpenMP thread on the GPU. 2120 llvm::Value *ThreadID = RT.getGPUThreadID(CGF); 2121 // nvptx_lane_id = nvptx_id % warpsize 2122 llvm::Value *LaneID = getNVPTXLaneID(CGF); 2123 // nvptx_warp_id = nvptx_id / warpsize 2124 llvm::Value *WarpID = getNVPTXWarpID(CGF); 2125 2126 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2127 Address LocalReduceList( 2128 Bld.CreatePointerBitCastOrAddrSpaceCast( 2129 CGF.EmitLoadOfScalar( 2130 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc, 2131 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()), 2132 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2133 CGF.getPointerAlign()); 2134 2135 unsigned Idx = 0; 2136 for (const Expr *Private : Privates) { 2137 // 2138 // Warp master copies reduce element to transfer medium in __shared__ 2139 // memory. 2140 // 2141 unsigned RealTySize = 2142 C.getTypeSizeInChars(Private->getType()) 2143 .alignTo(C.getTypeAlignInChars(Private->getType())) 2144 .getQuantity(); 2145 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) { 2146 unsigned NumIters = RealTySize / TySize; 2147 if (NumIters == 0) 2148 continue; 2149 QualType CType = C.getIntTypeForBitwidth( 2150 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1); 2151 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType); 2152 CharUnits Align = CharUnits::fromQuantity(TySize); 2153 llvm::Value *Cnt = nullptr; 2154 Address CntAddr = Address::invalid(); 2155 llvm::BasicBlock *PrecondBB = nullptr; 2156 llvm::BasicBlock *ExitBB = nullptr; 2157 if (NumIters > 1) { 2158 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr"); 2159 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr, 2160 /*Volatile=*/false, C.IntTy); 2161 PrecondBB = CGF.createBasicBlock("precond"); 2162 ExitBB = CGF.createBasicBlock("exit"); 2163 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body"); 2164 // There is no need to emit line number for unconditional branch. 2165 (void)ApplyDebugLocation::CreateEmpty(CGF); 2166 CGF.EmitBlock(PrecondBB); 2167 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc); 2168 llvm::Value *Cmp = 2169 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters)); 2170 Bld.CreateCondBr(Cmp, BodyBB, ExitBB); 2171 CGF.EmitBlock(BodyBB); 2172 } 2173 // kmpc_barrier. 2174 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 2175 /*EmitChecks=*/false, 2176 /*ForceSimpleCall=*/true); 2177 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 2178 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 2179 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 2180 2181 // if (lane_id == 0) 2182 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master"); 2183 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB); 2184 CGF.EmitBlock(ThenBB); 2185 2186 // Reduce element = LocalReduceList[i] 2187 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2188 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 2189 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 2190 // elemptr = ((CopyType*)(elemptrptr)) + I 2191 Address ElemPtr = Address(ElemPtrPtr, Align); 2192 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType); 2193 if (NumIters > 1) { 2194 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getElementType(), 2195 ElemPtr.getPointer(), Cnt), 2196 ElemPtr.getAlignment()); 2197 } 2198 2199 // Get pointer to location in transfer medium. 2200 // MediumPtr = &medium[warp_id] 2201 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP( 2202 TransferMedium->getValueType(), TransferMedium, 2203 {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID}); 2204 Address MediumPtr(MediumPtrVal, Align); 2205 // Casting to actual data type. 2206 // MediumPtr = (CopyType*)MediumPtrAddr; 2207 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType); 2208 2209 // elem = *elemptr 2210 //*MediumPtr = elem 2211 llvm::Value *Elem = CGF.EmitLoadOfScalar( 2212 ElemPtr, /*Volatile=*/false, CType, Loc, 2213 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 2214 // Store the source element value to the dest element address. 2215 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType, 2216 LValueBaseInfo(AlignmentSource::Type), 2217 TBAAAccessInfo()); 2218 2219 Bld.CreateBr(MergeBB); 2220 2221 CGF.EmitBlock(ElseBB); 2222 Bld.CreateBr(MergeBB); 2223 2224 CGF.EmitBlock(MergeBB); 2225 2226 // kmpc_barrier. 2227 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 2228 /*EmitChecks=*/false, 2229 /*ForceSimpleCall=*/true); 2230 2231 // 2232 // Warp 0 copies reduce element from transfer medium. 2233 // 2234 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then"); 2235 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else"); 2236 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont"); 2237 2238 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg); 2239 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar( 2240 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc); 2241 2242 // Up to 32 threads in warp 0 are active. 2243 llvm::Value *IsActiveThread = 2244 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread"); 2245 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB); 2246 2247 CGF.EmitBlock(W0ThenBB); 2248 2249 // SrcMediumPtr = &medium[tid] 2250 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP( 2251 TransferMedium->getValueType(), TransferMedium, 2252 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID}); 2253 Address SrcMediumPtr(SrcMediumPtrVal, Align); 2254 // SrcMediumVal = *SrcMediumPtr; 2255 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType); 2256 2257 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I 2258 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2259 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar( 2260 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc); 2261 Address TargetElemPtr = Address(TargetElemPtrVal, Align); 2262 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType); 2263 if (NumIters > 1) { 2264 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getElementType(), 2265 TargetElemPtr.getPointer(), Cnt), 2266 TargetElemPtr.getAlignment()); 2267 } 2268 2269 // *TargetElemPtr = SrcMediumVal; 2270 llvm::Value *SrcMediumValue = 2271 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc); 2272 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false, 2273 CType); 2274 Bld.CreateBr(W0MergeBB); 2275 2276 CGF.EmitBlock(W0ElseBB); 2277 Bld.CreateBr(W0MergeBB); 2278 2279 CGF.EmitBlock(W0MergeBB); 2280 2281 if (NumIters > 1) { 2282 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1)); 2283 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy); 2284 CGF.EmitBranch(PrecondBB); 2285 (void)ApplyDebugLocation::CreateEmpty(CGF); 2286 CGF.EmitBlock(ExitBB); 2287 } 2288 RealTySize %= TySize; 2289 } 2290 ++Idx; 2291 } 2292 2293 CGF.FinishFunction(); 2294 return Fn; 2295 } 2296 2297 /// Emit a helper that reduces data across two OpenMP threads (lanes) 2298 /// in the same warp. It uses shuffle instructions to copy over data from 2299 /// a remote lane's stack. The reduction algorithm performed is specified 2300 /// by the fourth parameter. 2301 /// 2302 /// Algorithm Versions. 2303 /// Full Warp Reduce (argument value 0): 2304 /// This algorithm assumes that all 32 lanes are active and gathers 2305 /// data from these 32 lanes, producing a single resultant value. 2306 /// Contiguous Partial Warp Reduce (argument value 1): 2307 /// This algorithm assumes that only a *contiguous* subset of lanes 2308 /// are active. This happens for the last warp in a parallel region 2309 /// when the user specified num_threads is not an integer multiple of 2310 /// 32. This contiguous subset always starts with the zeroth lane. 2311 /// Partial Warp Reduce (argument value 2): 2312 /// This algorithm gathers data from any number of lanes at any position. 2313 /// All reduced values are stored in the lowest possible lane. The set 2314 /// of problems every algorithm addresses is a super set of those 2315 /// addressable by algorithms with a lower version number. Overhead 2316 /// increases as algorithm version increases. 2317 /// 2318 /// Terminology 2319 /// Reduce element: 2320 /// Reduce element refers to the individual data field with primitive 2321 /// data types to be combined and reduced across threads. 2322 /// Reduce list: 2323 /// Reduce list refers to a collection of local, thread-private 2324 /// reduce elements. 2325 /// Remote Reduce list: 2326 /// Remote Reduce list refers to a collection of remote (relative to 2327 /// the current thread) reduce elements. 2328 /// 2329 /// We distinguish between three states of threads that are important to 2330 /// the implementation of this function. 2331 /// Alive threads: 2332 /// Threads in a warp executing the SIMT instruction, as distinguished from 2333 /// threads that are inactive due to divergent control flow. 2334 /// Active threads: 2335 /// The minimal set of threads that has to be alive upon entry to this 2336 /// function. The computation is correct iff active threads are alive. 2337 /// Some threads are alive but they are not active because they do not 2338 /// contribute to the computation in any useful manner. Turning them off 2339 /// may introduce control flow overheads without any tangible benefits. 2340 /// Effective threads: 2341 /// In order to comply with the argument requirements of the shuffle 2342 /// function, we must keep all lanes holding data alive. But at most 2343 /// half of them perform value aggregation; we refer to this half of 2344 /// threads as effective. The other half is simply handing off their 2345 /// data. 2346 /// 2347 /// Procedure 2348 /// Value shuffle: 2349 /// In this step active threads transfer data from higher lane positions 2350 /// in the warp to lower lane positions, creating Remote Reduce list. 2351 /// Value aggregation: 2352 /// In this step, effective threads combine their thread local Reduce list 2353 /// with Remote Reduce list and store the result in the thread local 2354 /// Reduce list. 2355 /// Value copy: 2356 /// In this step, we deal with the assumption made by algorithm 2 2357 /// (i.e. contiguity assumption). When we have an odd number of lanes 2358 /// active, say 2k+1, only k threads will be effective and therefore k 2359 /// new values will be produced. However, the Reduce list owned by the 2360 /// (2k+1)th thread is ignored in the value aggregation. Therefore 2361 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so 2362 /// that the contiguity assumption still holds. 2363 static llvm::Function *emitShuffleAndReduceFunction( 2364 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2365 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) { 2366 ASTContext &C = CGM.getContext(); 2367 2368 // Thread local Reduce list used to host the values of data to be reduced. 2369 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2370 C.VoidPtrTy, ImplicitParamDecl::Other); 2371 // Current lane id; could be logical. 2372 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy, 2373 ImplicitParamDecl::Other); 2374 // Offset of the remote source lane relative to the current lane. 2375 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2376 C.ShortTy, ImplicitParamDecl::Other); 2377 // Algorithm version. This is expected to be known at compile time. 2378 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2379 C.ShortTy, ImplicitParamDecl::Other); 2380 FunctionArgList Args; 2381 Args.push_back(&ReduceListArg); 2382 Args.push_back(&LaneIDArg); 2383 Args.push_back(&RemoteLaneOffsetArg); 2384 Args.push_back(&AlgoVerArg); 2385 2386 const CGFunctionInfo &CGFI = 2387 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2388 auto *Fn = llvm::Function::Create( 2389 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2390 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule()); 2391 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2392 Fn->setDoesNotRecurse(); 2393 2394 CodeGenFunction CGF(CGM); 2395 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2396 2397 CGBuilderTy &Bld = CGF.Builder; 2398 2399 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2400 Address LocalReduceList( 2401 Bld.CreatePointerBitCastOrAddrSpaceCast( 2402 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 2403 C.VoidPtrTy, SourceLocation()), 2404 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2405 CGF.getPointerAlign()); 2406 2407 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg); 2408 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar( 2409 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 2410 2411 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg); 2412 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar( 2413 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 2414 2415 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg); 2416 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar( 2417 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 2418 2419 // Create a local thread-private variable to host the Reduce list 2420 // from a remote lane. 2421 Address RemoteReduceList = 2422 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list"); 2423 2424 // This loop iterates through the list of reduce elements and copies, 2425 // element by element, from a remote lane in the warp to RemoteReduceList, 2426 // hosted on the thread's stack. 2427 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates, 2428 LocalReduceList, RemoteReduceList, 2429 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal, 2430 /*ScratchpadIndex=*/nullptr, 2431 /*ScratchpadWidth=*/nullptr}); 2432 2433 // The actions to be performed on the Remote Reduce list is dependent 2434 // on the algorithm version. 2435 // 2436 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 && 2437 // LaneId % 2 == 0 && Offset > 0): 2438 // do the reduction value aggregation 2439 // 2440 // The thread local variable Reduce list is mutated in place to host the 2441 // reduced data, which is the aggregated value produced from local and 2442 // remote lanes. 2443 // 2444 // Note that AlgoVer is expected to be a constant integer known at compile 2445 // time. 2446 // When AlgoVer==0, the first conjunction evaluates to true, making 2447 // the entire predicate true during compile time. 2448 // When AlgoVer==1, the second conjunction has only the second part to be 2449 // evaluated during runtime. Other conjunctions evaluates to false 2450 // during compile time. 2451 // When AlgoVer==2, the third conjunction has only the second part to be 2452 // evaluated during runtime. Other conjunctions evaluates to false 2453 // during compile time. 2454 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal); 2455 2456 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 2457 llvm::Value *CondAlgo1 = Bld.CreateAnd( 2458 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal)); 2459 2460 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2)); 2461 llvm::Value *CondAlgo2 = Bld.CreateAnd( 2462 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)))); 2463 CondAlgo2 = Bld.CreateAnd( 2464 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0))); 2465 2466 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1); 2467 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2); 2468 2469 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 2470 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 2471 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 2472 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB); 2473 2474 CGF.EmitBlock(ThenBB); 2475 // reduce_function(LocalReduceList, RemoteReduceList) 2476 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2477 LocalReduceList.getPointer(), CGF.VoidPtrTy); 2478 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2479 RemoteReduceList.getPointer(), CGF.VoidPtrTy); 2480 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 2481 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr}); 2482 Bld.CreateBr(MergeBB); 2483 2484 CGF.EmitBlock(ElseBB); 2485 Bld.CreateBr(MergeBB); 2486 2487 CGF.EmitBlock(MergeBB); 2488 2489 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local 2490 // Reduce list. 2491 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 2492 llvm::Value *CondCopy = Bld.CreateAnd( 2493 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal)); 2494 2495 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then"); 2496 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else"); 2497 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont"); 2498 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB); 2499 2500 CGF.EmitBlock(CpyThenBB); 2501 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates, 2502 RemoteReduceList, LocalReduceList); 2503 Bld.CreateBr(CpyMergeBB); 2504 2505 CGF.EmitBlock(CpyElseBB); 2506 Bld.CreateBr(CpyMergeBB); 2507 2508 CGF.EmitBlock(CpyMergeBB); 2509 2510 CGF.FinishFunction(); 2511 return Fn; 2512 } 2513 2514 /// This function emits a helper that copies all the reduction variables from 2515 /// the team into the provided global buffer for the reduction variables. 2516 /// 2517 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 2518 /// For all data entries D in reduce_data: 2519 /// Copy local D to buffer.D[Idx] 2520 static llvm::Value *emitListToGlobalCopyFunction( 2521 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2522 QualType ReductionArrayTy, SourceLocation Loc, 2523 const RecordDecl *TeamReductionRec, 2524 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2525 &VarFieldMap) { 2526 ASTContext &C = CGM.getContext(); 2527 2528 // Buffer: global reduction buffer. 2529 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2530 C.VoidPtrTy, ImplicitParamDecl::Other); 2531 // Idx: index of the buffer. 2532 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2533 ImplicitParamDecl::Other); 2534 // ReduceList: thread local Reduce list. 2535 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2536 C.VoidPtrTy, ImplicitParamDecl::Other); 2537 FunctionArgList Args; 2538 Args.push_back(&BufferArg); 2539 Args.push_back(&IdxArg); 2540 Args.push_back(&ReduceListArg); 2541 2542 const CGFunctionInfo &CGFI = 2543 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2544 auto *Fn = llvm::Function::Create( 2545 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2546 "_omp_reduction_list_to_global_copy_func", &CGM.getModule()); 2547 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2548 Fn->setDoesNotRecurse(); 2549 CodeGenFunction CGF(CGM); 2550 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2551 2552 CGBuilderTy &Bld = CGF.Builder; 2553 2554 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2555 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2556 Address LocalReduceList( 2557 Bld.CreatePointerBitCastOrAddrSpaceCast( 2558 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 2559 C.VoidPtrTy, Loc), 2560 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2561 CGF.getPointerAlign()); 2562 QualType StaticTy = C.getRecordType(TeamReductionRec); 2563 llvm::Type *LLVMReductionsBufferTy = 2564 CGM.getTypes().ConvertTypeForMem(StaticTy); 2565 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2566 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2567 LLVMReductionsBufferTy->getPointerTo()); 2568 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2569 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2570 /*Volatile=*/false, C.IntTy, 2571 Loc)}; 2572 unsigned Idx = 0; 2573 for (const Expr *Private : Privates) { 2574 // Reduce element = LocalReduceList[i] 2575 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2576 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 2577 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 2578 // elemptr = ((CopyType*)(elemptrptr)) + I 2579 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2580 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 2581 Address ElemPtr = 2582 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 2583 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 2584 // Global = Buffer.VD[Idx]; 2585 const FieldDecl *FD = VarFieldMap.lookup(VD); 2586 LValue GlobLVal = CGF.EmitLValueForField( 2587 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2588 Address GlobAddr = GlobLVal.getAddress(CGF); 2589 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2590 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2591 GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment())); 2592 switch (CGF.getEvaluationKind(Private->getType())) { 2593 case TEK_Scalar: { 2594 llvm::Value *V = CGF.EmitLoadOfScalar( 2595 ElemPtr, /*Volatile=*/false, Private->getType(), Loc, 2596 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 2597 CGF.EmitStoreOfScalar(V, GlobLVal); 2598 break; 2599 } 2600 case TEK_Complex: { 2601 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex( 2602 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc); 2603 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false); 2604 break; 2605 } 2606 case TEK_Aggregate: 2607 CGF.EmitAggregateCopy(GlobLVal, 2608 CGF.MakeAddrLValue(ElemPtr, Private->getType()), 2609 Private->getType(), AggValueSlot::DoesNotOverlap); 2610 break; 2611 } 2612 ++Idx; 2613 } 2614 2615 CGF.FinishFunction(); 2616 return Fn; 2617 } 2618 2619 /// This function emits a helper that reduces all the reduction variables from 2620 /// the team into the provided global buffer for the reduction variables. 2621 /// 2622 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data) 2623 /// void *GlobPtrs[]; 2624 /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 2625 /// ... 2626 /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 2627 /// reduce_function(GlobPtrs, reduce_data); 2628 static llvm::Value *emitListToGlobalReduceFunction( 2629 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2630 QualType ReductionArrayTy, SourceLocation Loc, 2631 const RecordDecl *TeamReductionRec, 2632 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2633 &VarFieldMap, 2634 llvm::Function *ReduceFn) { 2635 ASTContext &C = CGM.getContext(); 2636 2637 // Buffer: global reduction buffer. 2638 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2639 C.VoidPtrTy, ImplicitParamDecl::Other); 2640 // Idx: index of the buffer. 2641 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2642 ImplicitParamDecl::Other); 2643 // ReduceList: thread local Reduce list. 2644 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2645 C.VoidPtrTy, ImplicitParamDecl::Other); 2646 FunctionArgList Args; 2647 Args.push_back(&BufferArg); 2648 Args.push_back(&IdxArg); 2649 Args.push_back(&ReduceListArg); 2650 2651 const CGFunctionInfo &CGFI = 2652 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2653 auto *Fn = llvm::Function::Create( 2654 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2655 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule()); 2656 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2657 Fn->setDoesNotRecurse(); 2658 CodeGenFunction CGF(CGM); 2659 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2660 2661 CGBuilderTy &Bld = CGF.Builder; 2662 2663 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2664 QualType StaticTy = C.getRecordType(TeamReductionRec); 2665 llvm::Type *LLVMReductionsBufferTy = 2666 CGM.getTypes().ConvertTypeForMem(StaticTy); 2667 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2668 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2669 LLVMReductionsBufferTy->getPointerTo()); 2670 2671 // 1. Build a list of reduction variables. 2672 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 2673 Address ReductionList = 2674 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 2675 auto IPriv = Privates.begin(); 2676 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2677 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2678 /*Volatile=*/false, C.IntTy, 2679 Loc)}; 2680 unsigned Idx = 0; 2681 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 2682 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2683 // Global = Buffer.VD[Idx]; 2684 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 2685 const FieldDecl *FD = VarFieldMap.lookup(VD); 2686 LValue GlobLVal = CGF.EmitLValueForField( 2687 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2688 Address GlobAddr = GlobLVal.getAddress(CGF); 2689 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2690 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2691 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 2692 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 2693 if ((*IPriv)->getType()->isVariablyModifiedType()) { 2694 // Store array size. 2695 ++Idx; 2696 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2697 llvm::Value *Size = CGF.Builder.CreateIntCast( 2698 CGF.getVLASize( 2699 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 2700 .NumElts, 2701 CGF.SizeTy, /*isSigned=*/false); 2702 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 2703 Elem); 2704 } 2705 } 2706 2707 // Call reduce_function(GlobalReduceList, ReduceList) 2708 llvm::Value *GlobalReduceList = 2709 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 2710 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2711 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 2712 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 2713 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 2714 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr}); 2715 CGF.FinishFunction(); 2716 return Fn; 2717 } 2718 2719 /// This function emits a helper that copies all the reduction variables from 2720 /// the team into the provided global buffer for the reduction variables. 2721 /// 2722 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 2723 /// For all data entries D in reduce_data: 2724 /// Copy buffer.D[Idx] to local D; 2725 static llvm::Value *emitGlobalToListCopyFunction( 2726 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2727 QualType ReductionArrayTy, SourceLocation Loc, 2728 const RecordDecl *TeamReductionRec, 2729 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2730 &VarFieldMap) { 2731 ASTContext &C = CGM.getContext(); 2732 2733 // Buffer: global reduction buffer. 2734 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2735 C.VoidPtrTy, ImplicitParamDecl::Other); 2736 // Idx: index of the buffer. 2737 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2738 ImplicitParamDecl::Other); 2739 // ReduceList: thread local Reduce list. 2740 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2741 C.VoidPtrTy, ImplicitParamDecl::Other); 2742 FunctionArgList Args; 2743 Args.push_back(&BufferArg); 2744 Args.push_back(&IdxArg); 2745 Args.push_back(&ReduceListArg); 2746 2747 const CGFunctionInfo &CGFI = 2748 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2749 auto *Fn = llvm::Function::Create( 2750 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2751 "_omp_reduction_global_to_list_copy_func", &CGM.getModule()); 2752 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2753 Fn->setDoesNotRecurse(); 2754 CodeGenFunction CGF(CGM); 2755 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2756 2757 CGBuilderTy &Bld = CGF.Builder; 2758 2759 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2760 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2761 Address LocalReduceList( 2762 Bld.CreatePointerBitCastOrAddrSpaceCast( 2763 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 2764 C.VoidPtrTy, Loc), 2765 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2766 CGF.getPointerAlign()); 2767 QualType StaticTy = C.getRecordType(TeamReductionRec); 2768 llvm::Type *LLVMReductionsBufferTy = 2769 CGM.getTypes().ConvertTypeForMem(StaticTy); 2770 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2771 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2772 LLVMReductionsBufferTy->getPointerTo()); 2773 2774 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2775 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2776 /*Volatile=*/false, C.IntTy, 2777 Loc)}; 2778 unsigned Idx = 0; 2779 for (const Expr *Private : Privates) { 2780 // Reduce element = LocalReduceList[i] 2781 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2782 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 2783 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 2784 // elemptr = ((CopyType*)(elemptrptr)) + I 2785 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2786 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 2787 Address ElemPtr = 2788 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 2789 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 2790 // Global = Buffer.VD[Idx]; 2791 const FieldDecl *FD = VarFieldMap.lookup(VD); 2792 LValue GlobLVal = CGF.EmitLValueForField( 2793 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2794 Address GlobAddr = GlobLVal.getAddress(CGF); 2795 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2796 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2797 GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment())); 2798 switch (CGF.getEvaluationKind(Private->getType())) { 2799 case TEK_Scalar: { 2800 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc); 2801 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(), 2802 LValueBaseInfo(AlignmentSource::Type), 2803 TBAAAccessInfo()); 2804 break; 2805 } 2806 case TEK_Complex: { 2807 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc); 2808 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()), 2809 /*isInit=*/false); 2810 break; 2811 } 2812 case TEK_Aggregate: 2813 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()), 2814 GlobLVal, Private->getType(), 2815 AggValueSlot::DoesNotOverlap); 2816 break; 2817 } 2818 ++Idx; 2819 } 2820 2821 CGF.FinishFunction(); 2822 return Fn; 2823 } 2824 2825 /// This function emits a helper that reduces all the reduction variables from 2826 /// the team into the provided global buffer for the reduction variables. 2827 /// 2828 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data) 2829 /// void *GlobPtrs[]; 2830 /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 2831 /// ... 2832 /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 2833 /// reduce_function(reduce_data, GlobPtrs); 2834 static llvm::Value *emitGlobalToListReduceFunction( 2835 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2836 QualType ReductionArrayTy, SourceLocation Loc, 2837 const RecordDecl *TeamReductionRec, 2838 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2839 &VarFieldMap, 2840 llvm::Function *ReduceFn) { 2841 ASTContext &C = CGM.getContext(); 2842 2843 // Buffer: global reduction buffer. 2844 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2845 C.VoidPtrTy, ImplicitParamDecl::Other); 2846 // Idx: index of the buffer. 2847 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2848 ImplicitParamDecl::Other); 2849 // ReduceList: thread local Reduce list. 2850 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2851 C.VoidPtrTy, ImplicitParamDecl::Other); 2852 FunctionArgList Args; 2853 Args.push_back(&BufferArg); 2854 Args.push_back(&IdxArg); 2855 Args.push_back(&ReduceListArg); 2856 2857 const CGFunctionInfo &CGFI = 2858 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2859 auto *Fn = llvm::Function::Create( 2860 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2861 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule()); 2862 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2863 Fn->setDoesNotRecurse(); 2864 CodeGenFunction CGF(CGM); 2865 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2866 2867 CGBuilderTy &Bld = CGF.Builder; 2868 2869 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2870 QualType StaticTy = C.getRecordType(TeamReductionRec); 2871 llvm::Type *LLVMReductionsBufferTy = 2872 CGM.getTypes().ConvertTypeForMem(StaticTy); 2873 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2874 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2875 LLVMReductionsBufferTy->getPointerTo()); 2876 2877 // 1. Build a list of reduction variables. 2878 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 2879 Address ReductionList = 2880 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 2881 auto IPriv = Privates.begin(); 2882 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2883 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2884 /*Volatile=*/false, C.IntTy, 2885 Loc)}; 2886 unsigned Idx = 0; 2887 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 2888 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2889 // Global = Buffer.VD[Idx]; 2890 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 2891 const FieldDecl *FD = VarFieldMap.lookup(VD); 2892 LValue GlobLVal = CGF.EmitLValueForField( 2893 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2894 Address GlobAddr = GlobLVal.getAddress(CGF); 2895 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2896 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2897 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 2898 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 2899 if ((*IPriv)->getType()->isVariablyModifiedType()) { 2900 // Store array size. 2901 ++Idx; 2902 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2903 llvm::Value *Size = CGF.Builder.CreateIntCast( 2904 CGF.getVLASize( 2905 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 2906 .NumElts, 2907 CGF.SizeTy, /*isSigned=*/false); 2908 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 2909 Elem); 2910 } 2911 } 2912 2913 // Call reduce_function(ReduceList, GlobalReduceList) 2914 llvm::Value *GlobalReduceList = 2915 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 2916 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2917 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 2918 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 2919 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 2920 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList}); 2921 CGF.FinishFunction(); 2922 return Fn; 2923 } 2924 2925 /// 2926 /// Design of OpenMP reductions on the GPU 2927 /// 2928 /// Consider a typical OpenMP program with one or more reduction 2929 /// clauses: 2930 /// 2931 /// float foo; 2932 /// double bar; 2933 /// #pragma omp target teams distribute parallel for \ 2934 /// reduction(+:foo) reduction(*:bar) 2935 /// for (int i = 0; i < N; i++) { 2936 /// foo += A[i]; bar *= B[i]; 2937 /// } 2938 /// 2939 /// where 'foo' and 'bar' are reduced across all OpenMP threads in 2940 /// all teams. In our OpenMP implementation on the NVPTX device an 2941 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads 2942 /// within a team are mapped to CUDA threads within a threadblock. 2943 /// Our goal is to efficiently aggregate values across all OpenMP 2944 /// threads such that: 2945 /// 2946 /// - the compiler and runtime are logically concise, and 2947 /// - the reduction is performed efficiently in a hierarchical 2948 /// manner as follows: within OpenMP threads in the same warp, 2949 /// across warps in a threadblock, and finally across teams on 2950 /// the NVPTX device. 2951 /// 2952 /// Introduction to Decoupling 2953 /// 2954 /// We would like to decouple the compiler and the runtime so that the 2955 /// latter is ignorant of the reduction variables (number, data types) 2956 /// and the reduction operators. This allows a simpler interface 2957 /// and implementation while still attaining good performance. 2958 /// 2959 /// Pseudocode for the aforementioned OpenMP program generated by the 2960 /// compiler is as follows: 2961 /// 2962 /// 1. Create private copies of reduction variables on each OpenMP 2963 /// thread: 'foo_private', 'bar_private' 2964 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned 2965 /// to it and writes the result in 'foo_private' and 'bar_private' 2966 /// respectively. 2967 /// 3. Call the OpenMP runtime on the GPU to reduce within a team 2968 /// and store the result on the team master: 2969 /// 2970 /// __kmpc_nvptx_parallel_reduce_nowait_v2(..., 2971 /// reduceData, shuffleReduceFn, interWarpCpyFn) 2972 /// 2973 /// where: 2974 /// struct ReduceData { 2975 /// double *foo; 2976 /// double *bar; 2977 /// } reduceData 2978 /// reduceData.foo = &foo_private 2979 /// reduceData.bar = &bar_private 2980 /// 2981 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two 2982 /// auxiliary functions generated by the compiler that operate on 2983 /// variables of type 'ReduceData'. They aid the runtime perform 2984 /// algorithmic steps in a data agnostic manner. 2985 /// 2986 /// 'shuffleReduceFn' is a pointer to a function that reduces data 2987 /// of type 'ReduceData' across two OpenMP threads (lanes) in the 2988 /// same warp. It takes the following arguments as input: 2989 /// 2990 /// a. variable of type 'ReduceData' on the calling lane, 2991 /// b. its lane_id, 2992 /// c. an offset relative to the current lane_id to generate a 2993 /// remote_lane_id. The remote lane contains the second 2994 /// variable of type 'ReduceData' that is to be reduced. 2995 /// d. an algorithm version parameter determining which reduction 2996 /// algorithm to use. 2997 /// 2998 /// 'shuffleReduceFn' retrieves data from the remote lane using 2999 /// efficient GPU shuffle intrinsics and reduces, using the 3000 /// algorithm specified by the 4th parameter, the two operands 3001 /// element-wise. The result is written to the first operand. 3002 /// 3003 /// Different reduction algorithms are implemented in different 3004 /// runtime functions, all calling 'shuffleReduceFn' to perform 3005 /// the essential reduction step. Therefore, based on the 4th 3006 /// parameter, this function behaves slightly differently to 3007 /// cooperate with the runtime to ensure correctness under 3008 /// different circumstances. 3009 /// 3010 /// 'InterWarpCpyFn' is a pointer to a function that transfers 3011 /// reduced variables across warps. It tunnels, through CUDA 3012 /// shared memory, the thread-private data of type 'ReduceData' 3013 /// from lane 0 of each warp to a lane in the first warp. 3014 /// 4. Call the OpenMP runtime on the GPU to reduce across teams. 3015 /// The last team writes the global reduced value to memory. 3016 /// 3017 /// ret = __kmpc_nvptx_teams_reduce_nowait(..., 3018 /// reduceData, shuffleReduceFn, interWarpCpyFn, 3019 /// scratchpadCopyFn, loadAndReduceFn) 3020 /// 3021 /// 'scratchpadCopyFn' is a helper that stores reduced 3022 /// data from the team master to a scratchpad array in 3023 /// global memory. 3024 /// 3025 /// 'loadAndReduceFn' is a helper that loads data from 3026 /// the scratchpad array and reduces it with the input 3027 /// operand. 3028 /// 3029 /// These compiler generated functions hide address 3030 /// calculation and alignment information from the runtime. 3031 /// 5. if ret == 1: 3032 /// The team master of the last team stores the reduced 3033 /// result to the globals in memory. 3034 /// foo += reduceData.foo; bar *= reduceData.bar 3035 /// 3036 /// 3037 /// Warp Reduction Algorithms 3038 /// 3039 /// On the warp level, we have three algorithms implemented in the 3040 /// OpenMP runtime depending on the number of active lanes: 3041 /// 3042 /// Full Warp Reduction 3043 /// 3044 /// The reduce algorithm within a warp where all lanes are active 3045 /// is implemented in the runtime as follows: 3046 /// 3047 /// full_warp_reduce(void *reduce_data, 3048 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 3049 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2) 3050 /// ShuffleReduceFn(reduce_data, 0, offset, 0); 3051 /// } 3052 /// 3053 /// The algorithm completes in log(2, WARPSIZE) steps. 3054 /// 3055 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is 3056 /// not used therefore we save instructions by not retrieving lane_id 3057 /// from the corresponding special registers. The 4th parameter, which 3058 /// represents the version of the algorithm being used, is set to 0 to 3059 /// signify full warp reduction. 3060 /// 3061 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 3062 /// 3063 /// #reduce_elem refers to an element in the local lane's data structure 3064 /// #remote_elem is retrieved from a remote lane 3065 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 3066 /// reduce_elem = reduce_elem REDUCE_OP remote_elem; 3067 /// 3068 /// Contiguous Partial Warp Reduction 3069 /// 3070 /// This reduce algorithm is used within a warp where only the first 3071 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the 3072 /// number of OpenMP threads in a parallel region is not a multiple of 3073 /// WARPSIZE. The algorithm is implemented in the runtime as follows: 3074 /// 3075 /// void 3076 /// contiguous_partial_reduce(void *reduce_data, 3077 /// kmp_ShuffleReductFctPtr ShuffleReduceFn, 3078 /// int size, int lane_id) { 3079 /// int curr_size; 3080 /// int offset; 3081 /// curr_size = size; 3082 /// mask = curr_size/2; 3083 /// while (offset>0) { 3084 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1); 3085 /// curr_size = (curr_size+1)/2; 3086 /// offset = curr_size/2; 3087 /// } 3088 /// } 3089 /// 3090 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 3091 /// 3092 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 3093 /// if (lane_id < offset) 3094 /// reduce_elem = reduce_elem REDUCE_OP remote_elem 3095 /// else 3096 /// reduce_elem = remote_elem 3097 /// 3098 /// This algorithm assumes that the data to be reduced are located in a 3099 /// contiguous subset of lanes starting from the first. When there is 3100 /// an odd number of active lanes, the data in the last lane is not 3101 /// aggregated with any other lane's dat but is instead copied over. 3102 /// 3103 /// Dispersed Partial Warp Reduction 3104 /// 3105 /// This algorithm is used within a warp when any discontiguous subset of 3106 /// lanes are active. It is used to implement the reduction operation 3107 /// across lanes in an OpenMP simd region or in a nested parallel region. 3108 /// 3109 /// void 3110 /// dispersed_partial_reduce(void *reduce_data, 3111 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 3112 /// int size, remote_id; 3113 /// int logical_lane_id = number_of_active_lanes_before_me() * 2; 3114 /// do { 3115 /// remote_id = next_active_lane_id_right_after_me(); 3116 /// # the above function returns 0 of no active lane 3117 /// # is present right after the current lane. 3118 /// size = number_of_active_lanes_in_this_warp(); 3119 /// logical_lane_id /= 2; 3120 /// ShuffleReduceFn(reduce_data, logical_lane_id, 3121 /// remote_id-1-threadIdx.x, 2); 3122 /// } while (logical_lane_id % 2 == 0 && size > 1); 3123 /// } 3124 /// 3125 /// There is no assumption made about the initial state of the reduction. 3126 /// Any number of lanes (>=1) could be active at any position. The reduction 3127 /// result is returned in the first active lane. 3128 /// 3129 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 3130 /// 3131 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 3132 /// if (lane_id % 2 == 0 && offset > 0) 3133 /// reduce_elem = reduce_elem REDUCE_OP remote_elem 3134 /// else 3135 /// reduce_elem = remote_elem 3136 /// 3137 /// 3138 /// Intra-Team Reduction 3139 /// 3140 /// This function, as implemented in the runtime call 3141 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP 3142 /// threads in a team. It first reduces within a warp using the 3143 /// aforementioned algorithms. We then proceed to gather all such 3144 /// reduced values at the first warp. 3145 /// 3146 /// The runtime makes use of the function 'InterWarpCpyFn', which copies 3147 /// data from each of the "warp master" (zeroth lane of each warp, where 3148 /// warp-reduced data is held) to the zeroth warp. This step reduces (in 3149 /// a mathematical sense) the problem of reduction across warp masters in 3150 /// a block to the problem of warp reduction. 3151 /// 3152 /// 3153 /// Inter-Team Reduction 3154 /// 3155 /// Once a team has reduced its data to a single value, it is stored in 3156 /// a global scratchpad array. Since each team has a distinct slot, this 3157 /// can be done without locking. 3158 /// 3159 /// The last team to write to the scratchpad array proceeds to reduce the 3160 /// scratchpad array. One or more workers in the last team use the helper 3161 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e., 3162 /// the k'th worker reduces every k'th element. 3163 /// 3164 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to 3165 /// reduce across workers and compute a globally reduced value. 3166 /// 3167 void CGOpenMPRuntimeGPU::emitReduction( 3168 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, 3169 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, 3170 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) { 3171 if (!CGF.HaveInsertPoint()) 3172 return; 3173 3174 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind); 3175 #ifndef NDEBUG 3176 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind); 3177 #endif 3178 3179 if (Options.SimpleReduction) { 3180 assert(!TeamsReduction && !ParallelReduction && 3181 "Invalid reduction selection in emitReduction."); 3182 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs, 3183 ReductionOps, Options); 3184 return; 3185 } 3186 3187 assert((TeamsReduction || ParallelReduction) && 3188 "Invalid reduction selection in emitReduction."); 3189 3190 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList), 3191 // RedList, shuffle_reduce_func, interwarp_copy_func); 3192 // or 3193 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>); 3194 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 3195 llvm::Value *ThreadId = getThreadID(CGF, Loc); 3196 3197 llvm::Value *Res; 3198 ASTContext &C = CGM.getContext(); 3199 // 1. Build a list of reduction variables. 3200 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 3201 auto Size = RHSExprs.size(); 3202 for (const Expr *E : Privates) { 3203 if (E->getType()->isVariablyModifiedType()) 3204 // Reserve place for array size. 3205 ++Size; 3206 } 3207 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size); 3208 QualType ReductionArrayTy = 3209 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal, 3210 /*IndexTypeQuals=*/0); 3211 Address ReductionList = 3212 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 3213 auto IPriv = Privates.begin(); 3214 unsigned Idx = 0; 3215 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) { 3216 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3217 CGF.Builder.CreateStore( 3218 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3219 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), 3220 Elem); 3221 if ((*IPriv)->getType()->isVariablyModifiedType()) { 3222 // Store array size. 3223 ++Idx; 3224 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3225 llvm::Value *Size = CGF.Builder.CreateIntCast( 3226 CGF.getVLASize( 3227 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 3228 .NumElts, 3229 CGF.SizeTy, /*isSigned=*/false); 3230 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 3231 Elem); 3232 } 3233 } 3234 3235 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3236 ReductionList.getPointer(), CGF.VoidPtrTy); 3237 llvm::Function *ReductionFn = emitReductionFunction( 3238 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates, 3239 LHSExprs, RHSExprs, ReductionOps); 3240 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy); 3241 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction( 3242 CGM, Privates, ReductionArrayTy, ReductionFn, Loc); 3243 llvm::Value *InterWarpCopyFn = 3244 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc); 3245 3246 if (ParallelReduction) { 3247 llvm::Value *Args[] = {RTLoc, 3248 ThreadId, 3249 CGF.Builder.getInt32(RHSExprs.size()), 3250 ReductionArrayTySize, 3251 RL, 3252 ShuffleAndReduceFn, 3253 InterWarpCopyFn}; 3254 3255 Res = CGF.EmitRuntimeCall( 3256 OMPBuilder.getOrCreateRuntimeFunction( 3257 CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2), 3258 Args); 3259 } else { 3260 assert(TeamsReduction && "expected teams reduction."); 3261 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap; 3262 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size()); 3263 int Cnt = 0; 3264 for (const Expr *DRE : Privates) { 3265 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl(); 3266 ++Cnt; 3267 } 3268 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars( 3269 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap, 3270 C.getLangOpts().OpenMPCUDAReductionBufNum); 3271 TeamsReductions.push_back(TeamReductionRec); 3272 if (!KernelTeamsReductionPtr) { 3273 KernelTeamsReductionPtr = new llvm::GlobalVariable( 3274 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true, 3275 llvm::GlobalValue::InternalLinkage, nullptr, 3276 "_openmp_teams_reductions_buffer_$_$ptr"); 3277 } 3278 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar( 3279 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()), 3280 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); 3281 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction( 3282 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 3283 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction( 3284 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 3285 ReductionFn); 3286 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction( 3287 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 3288 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction( 3289 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 3290 ReductionFn); 3291 3292 llvm::Value *Args[] = { 3293 RTLoc, 3294 ThreadId, 3295 GlobalBufferPtr, 3296 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum), 3297 RL, 3298 ShuffleAndReduceFn, 3299 InterWarpCopyFn, 3300 GlobalToBufferCpyFn, 3301 GlobalToBufferRedFn, 3302 BufferToGlobalCpyFn, 3303 BufferToGlobalRedFn}; 3304 3305 Res = CGF.EmitRuntimeCall( 3306 OMPBuilder.getOrCreateRuntimeFunction( 3307 CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2), 3308 Args); 3309 } 3310 3311 // 5. Build if (res == 1) 3312 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done"); 3313 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then"); 3314 llvm::Value *Cond = CGF.Builder.CreateICmpEQ( 3315 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1)); 3316 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB); 3317 3318 // 6. Build then branch: where we have reduced values in the master 3319 // thread in each team. 3320 // __kmpc_end_reduce{_nowait}(<gtid>); 3321 // break; 3322 CGF.EmitBlock(ThenBB); 3323 3324 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>); 3325 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps, 3326 this](CodeGenFunction &CGF, PrePostActionTy &Action) { 3327 auto IPriv = Privates.begin(); 3328 auto ILHS = LHSExprs.begin(); 3329 auto IRHS = RHSExprs.begin(); 3330 for (const Expr *E : ReductionOps) { 3331 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS), 3332 cast<DeclRefExpr>(*IRHS)); 3333 ++IPriv; 3334 ++ILHS; 3335 ++IRHS; 3336 } 3337 }; 3338 llvm::Value *EndArgs[] = {ThreadId}; 3339 RegionCodeGenTy RCG(CodeGen); 3340 NVPTXActionTy Action( 3341 nullptr, llvm::None, 3342 OMPBuilder.getOrCreateRuntimeFunction( 3343 CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait), 3344 EndArgs); 3345 RCG.setAction(Action); 3346 RCG(CGF); 3347 // There is no need to emit line number for unconditional branch. 3348 (void)ApplyDebugLocation::CreateEmpty(CGF); 3349 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3350 } 3351 3352 const VarDecl * 3353 CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD, 3354 const VarDecl *NativeParam) const { 3355 if (!NativeParam->getType()->isReferenceType()) 3356 return NativeParam; 3357 QualType ArgType = NativeParam->getType(); 3358 QualifierCollector QC; 3359 const Type *NonQualTy = QC.strip(ArgType); 3360 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 3361 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) { 3362 if (Attr->getCaptureKind() == OMPC_map) { 3363 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy, 3364 LangAS::opencl_global); 3365 } 3366 } 3367 ArgType = CGM.getContext().getPointerType(PointeeTy); 3368 QC.addRestrict(); 3369 enum { NVPTX_local_addr = 5 }; 3370 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr)); 3371 ArgType = QC.apply(CGM.getContext(), ArgType); 3372 if (isa<ImplicitParamDecl>(NativeParam)) 3373 return ImplicitParamDecl::Create( 3374 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(), 3375 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other); 3376 return ParmVarDecl::Create( 3377 CGM.getContext(), 3378 const_cast<DeclContext *>(NativeParam->getDeclContext()), 3379 NativeParam->getBeginLoc(), NativeParam->getLocation(), 3380 NativeParam->getIdentifier(), ArgType, 3381 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 3382 } 3383 3384 Address 3385 CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF, 3386 const VarDecl *NativeParam, 3387 const VarDecl *TargetParam) const { 3388 assert(NativeParam != TargetParam && 3389 NativeParam->getType()->isReferenceType() && 3390 "Native arg must not be the same as target arg."); 3391 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam); 3392 QualType NativeParamType = NativeParam->getType(); 3393 QualifierCollector QC; 3394 const Type *NonQualTy = QC.strip(NativeParamType); 3395 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 3396 unsigned NativePointeeAddrSpace = 3397 CGF.getContext().getTargetAddressSpace(NativePointeeTy); 3398 QualType TargetTy = TargetParam->getType(); 3399 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar( 3400 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation()); 3401 // First cast to generic. 3402 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3403 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo( 3404 /*AddrSpace=*/0)); 3405 // Cast from generic to native address space. 3406 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3407 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo( 3408 NativePointeeAddrSpace)); 3409 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType); 3410 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false, 3411 NativeParamType); 3412 return NativeParamAddr; 3413 } 3414 3415 void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall( 3416 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, 3417 ArrayRef<llvm::Value *> Args) const { 3418 SmallVector<llvm::Value *, 4> TargetArgs; 3419 TargetArgs.reserve(Args.size()); 3420 auto *FnType = OutlinedFn.getFunctionType(); 3421 for (unsigned I = 0, E = Args.size(); I < E; ++I) { 3422 if (FnType->isVarArg() && FnType->getNumParams() <= I) { 3423 TargetArgs.append(std::next(Args.begin(), I), Args.end()); 3424 break; 3425 } 3426 llvm::Type *TargetType = FnType->getParamType(I); 3427 llvm::Value *NativeArg = Args[I]; 3428 if (!TargetType->isPointerTy()) { 3429 TargetArgs.emplace_back(NativeArg); 3430 continue; 3431 } 3432 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3433 NativeArg, 3434 NativeArg->getType()->getPointerElementType()->getPointerTo()); 3435 TargetArgs.emplace_back( 3436 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType)); 3437 } 3438 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs); 3439 } 3440 3441 /// Emit function which wraps the outline parallel region 3442 /// and controls the arguments which are passed to this function. 3443 /// The wrapper ensures that the outlined function is called 3444 /// with the correct arguments when data is shared. 3445 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper( 3446 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) { 3447 ASTContext &Ctx = CGM.getContext(); 3448 const auto &CS = *D.getCapturedStmt(OMPD_parallel); 3449 3450 // Create a function that takes as argument the source thread. 3451 FunctionArgList WrapperArgs; 3452 QualType Int16QTy = 3453 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false); 3454 QualType Int32QTy = 3455 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false); 3456 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 3457 /*Id=*/nullptr, Int16QTy, 3458 ImplicitParamDecl::Other); 3459 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 3460 /*Id=*/nullptr, Int32QTy, 3461 ImplicitParamDecl::Other); 3462 WrapperArgs.emplace_back(&ParallelLevelArg); 3463 WrapperArgs.emplace_back(&WrapperArg); 3464 3465 const CGFunctionInfo &CGFI = 3466 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs); 3467 3468 auto *Fn = llvm::Function::Create( 3469 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3470 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule()); 3471 3472 // Ensure we do not inline the function. This is trivially true for the ones 3473 // passed to __kmpc_fork_call but the ones calles in serialized regions 3474 // could be inlined. This is not a perfect but it is closer to the invariant 3475 // we want, namely, every data environment starts with a new function. 3476 // TODO: We should pass the if condition to the runtime function and do the 3477 // handling there. Much cleaner code. 3478 Fn->addFnAttr(llvm::Attribute::NoInline); 3479 3480 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3481 Fn->setLinkage(llvm::GlobalValue::InternalLinkage); 3482 Fn->setDoesNotRecurse(); 3483 3484 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 3485 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs, 3486 D.getBeginLoc(), D.getBeginLoc()); 3487 3488 const auto *RD = CS.getCapturedRecordDecl(); 3489 auto CurField = RD->field_begin(); 3490 3491 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 3492 /*Name=*/".zero.addr"); 3493 CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr); 3494 // Get the array of arguments. 3495 SmallVector<llvm::Value *, 8> Args; 3496 3497 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer()); 3498 Args.emplace_back(ZeroAddr.getPointer()); 3499 3500 CGBuilderTy &Bld = CGF.Builder; 3501 auto CI = CS.capture_begin(); 3502 3503 // Use global memory for data sharing. 3504 // Handle passing of global args to workers. 3505 Address GlobalArgs = 3506 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args"); 3507 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer(); 3508 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr}; 3509 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 3510 CGM.getModule(), OMPRTL___kmpc_get_shared_variables), 3511 DataSharingArgs); 3512 3513 // Retrieve the shared variables from the list of references returned 3514 // by the runtime. Pass the variables to the outlined function. 3515 Address SharedArgListAddress = Address::invalid(); 3516 if (CS.capture_size() > 0 || 3517 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 3518 SharedArgListAddress = CGF.EmitLoadOfPointer( 3519 GlobalArgs, CGF.getContext() 3520 .getPointerType(CGF.getContext().getPointerType( 3521 CGF.getContext().VoidPtrTy)) 3522 .castAs<PointerType>()); 3523 } 3524 unsigned Idx = 0; 3525 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 3526 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 3527 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 3528 Src, CGF.SizeTy->getPointerTo()); 3529 llvm::Value *LB = CGF.EmitLoadOfScalar( 3530 TypedAddress, 3531 /*Volatile=*/false, 3532 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 3533 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc()); 3534 Args.emplace_back(LB); 3535 ++Idx; 3536 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 3537 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 3538 Src, CGF.SizeTy->getPointerTo()); 3539 llvm::Value *UB = CGF.EmitLoadOfScalar( 3540 TypedAddress, 3541 /*Volatile=*/false, 3542 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 3543 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc()); 3544 Args.emplace_back(UB); 3545 ++Idx; 3546 } 3547 if (CS.capture_size() > 0) { 3548 ASTContext &CGFContext = CGF.getContext(); 3549 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) { 3550 QualType ElemTy = CurField->getType(); 3551 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx); 3552 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 3553 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy))); 3554 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress, 3555 /*Volatile=*/false, 3556 CGFContext.getPointerType(ElemTy), 3557 CI->getLocation()); 3558 if (CI->capturesVariableByCopy() && 3559 !CI->getCapturedVar()->getType()->isAnyPointerType()) { 3560 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(), 3561 CI->getLocation()); 3562 } 3563 Args.emplace_back(Arg); 3564 } 3565 } 3566 3567 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args); 3568 CGF.FinishFunction(); 3569 return Fn; 3570 } 3571 3572 void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF, 3573 const Decl *D) { 3574 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic) 3575 return; 3576 3577 assert(D && "Expected function or captured|block decl."); 3578 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && 3579 "Function is registered already."); 3580 assert((!TeamAndReductions.first || TeamAndReductions.first == D) && 3581 "Team is set but not processed."); 3582 const Stmt *Body = nullptr; 3583 bool NeedToDelayGlobalization = false; 3584 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 3585 Body = FD->getBody(); 3586 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) { 3587 Body = BD->getBody(); 3588 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) { 3589 Body = CD->getBody(); 3590 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP; 3591 if (NeedToDelayGlobalization && 3592 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 3593 return; 3594 } 3595 if (!Body) 3596 return; 3597 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second); 3598 VarChecker.Visit(Body); 3599 const RecordDecl *GlobalizedVarsRecord = 3600 VarChecker.getGlobalizedRecord(IsInTTDRegion); 3601 TeamAndReductions.first = nullptr; 3602 TeamAndReductions.second.clear(); 3603 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls = 3604 VarChecker.getEscapedVariableLengthDecls(); 3605 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty()) 3606 return; 3607 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 3608 I->getSecond().MappedParams = 3609 std::make_unique<CodeGenFunction::OMPMapVars>(); 3610 I->getSecond().EscapedParameters.insert( 3611 VarChecker.getEscapedParameters().begin(), 3612 VarChecker.getEscapedParameters().end()); 3613 I->getSecond().EscapedVariableLengthDecls.append( 3614 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end()); 3615 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 3616 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 3617 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 3618 Data.insert(std::make_pair(VD, MappedVarData())); 3619 } 3620 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) { 3621 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None); 3622 VarChecker.Visit(Body); 3623 I->getSecond().SecondaryLocalVarData.emplace(); 3624 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue(); 3625 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 3626 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 3627 Data.insert(std::make_pair(VD, MappedVarData())); 3628 } 3629 } 3630 if (!NeedToDelayGlobalization) { 3631 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true); 3632 struct GlobalizationScope final : EHScopeStack::Cleanup { 3633 GlobalizationScope() = default; 3634 3635 void Emit(CodeGenFunction &CGF, Flags flags) override { 3636 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) 3637 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true); 3638 } 3639 }; 3640 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup); 3641 } 3642 } 3643 3644 Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF, 3645 const VarDecl *VD) { 3646 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) { 3647 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 3648 auto AS = LangAS::Default; 3649 switch (A->getAllocatorType()) { 3650 // Use the default allocator here as by default local vars are 3651 // threadlocal. 3652 case OMPAllocateDeclAttr::OMPNullMemAlloc: 3653 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 3654 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 3655 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 3656 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 3657 // Follow the user decision - use default allocation. 3658 return Address::invalid(); 3659 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 3660 // TODO: implement aupport for user-defined allocators. 3661 return Address::invalid(); 3662 case OMPAllocateDeclAttr::OMPConstMemAlloc: 3663 AS = LangAS::cuda_constant; 3664 break; 3665 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 3666 AS = LangAS::cuda_shared; 3667 break; 3668 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 3669 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 3670 break; 3671 } 3672 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType()); 3673 auto *GV = new llvm::GlobalVariable( 3674 CGM.getModule(), VarTy, /*isConstant=*/false, 3675 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy), 3676 VD->getName(), 3677 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 3678 CGM.getContext().getTargetAddressSpace(AS)); 3679 CharUnits Align = CGM.getContext().getDeclAlign(VD); 3680 GV->setAlignment(Align.getAsAlign()); 3681 return Address( 3682 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3683 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace( 3684 VD->getType().getAddressSpace()))), 3685 Align); 3686 } 3687 3688 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic) 3689 return Address::invalid(); 3690 3691 VD = VD->getCanonicalDecl(); 3692 auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 3693 if (I == FunctionGlobalizedDecls.end()) 3694 return Address::invalid(); 3695 auto VDI = I->getSecond().LocalVarData.find(VD); 3696 if (VDI != I->getSecond().LocalVarData.end()) 3697 return VDI->second.PrivateAddr; 3698 if (VD->hasAttrs()) { 3699 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()), 3700 E(VD->attr_end()); 3701 IT != E; ++IT) { 3702 auto VDI = I->getSecond().LocalVarData.find( 3703 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl()) 3704 ->getCanonicalDecl()); 3705 if (VDI != I->getSecond().LocalVarData.end()) 3706 return VDI->second.PrivateAddr; 3707 } 3708 } 3709 3710 return Address::invalid(); 3711 } 3712 3713 void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) { 3714 FunctionGlobalizedDecls.erase(CGF.CurFn); 3715 CGOpenMPRuntime::functionFinished(CGF); 3716 } 3717 3718 void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk( 3719 CodeGenFunction &CGF, const OMPLoopDirective &S, 3720 OpenMPDistScheduleClauseKind &ScheduleKind, 3721 llvm::Value *&Chunk) const { 3722 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 3723 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { 3724 ScheduleKind = OMPC_DIST_SCHEDULE_static; 3725 Chunk = CGF.EmitScalarConversion( 3726 RT.getGPUNumThreads(CGF), 3727 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 3728 S.getIterationVariable()->getType(), S.getBeginLoc()); 3729 return; 3730 } 3731 CGOpenMPRuntime::getDefaultDistScheduleAndChunk( 3732 CGF, S, ScheduleKind, Chunk); 3733 } 3734 3735 void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk( 3736 CodeGenFunction &CGF, const OMPLoopDirective &S, 3737 OpenMPScheduleClauseKind &ScheduleKind, 3738 const Expr *&ChunkExpr) const { 3739 ScheduleKind = OMPC_SCHEDULE_static; 3740 // Chunk size is 1 in this case. 3741 llvm::APInt ChunkSize(32, 1); 3742 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize, 3743 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 3744 SourceLocation()); 3745 } 3746 3747 void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas( 3748 CodeGenFunction &CGF, const OMPExecutableDirective &D) const { 3749 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && 3750 " Expected target-based directive."); 3751 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target); 3752 for (const CapturedStmt::Capture &C : CS->captures()) { 3753 // Capture variables captured by reference in lambdas for target-based 3754 // directives. 3755 if (!C.capturesVariable()) 3756 continue; 3757 const VarDecl *VD = C.getCapturedVar(); 3758 const auto *RD = VD->getType() 3759 .getCanonicalType() 3760 .getNonReferenceType() 3761 ->getAsCXXRecordDecl(); 3762 if (!RD || !RD->isLambda()) 3763 continue; 3764 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 3765 LValue VDLVal; 3766 if (VD->getType().getCanonicalType()->isReferenceType()) 3767 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType()); 3768 else 3769 VDLVal = CGF.MakeAddrLValue( 3770 VDAddr, VD->getType().getCanonicalType().getNonReferenceType()); 3771 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures; 3772 FieldDecl *ThisCapture = nullptr; 3773 RD->getCaptureFields(Captures, ThisCapture); 3774 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) { 3775 LValue ThisLVal = 3776 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture); 3777 llvm::Value *CXXThis = CGF.LoadCXXThis(); 3778 CGF.EmitStoreOfScalar(CXXThis, ThisLVal); 3779 } 3780 for (const LambdaCapture &LC : RD->captures()) { 3781 if (LC.getCaptureKind() != LCK_ByRef) 3782 continue; 3783 const VarDecl *VD = LC.getCapturedVar(); 3784 if (!CS->capturesVariable(VD)) 3785 continue; 3786 auto It = Captures.find(VD); 3787 assert(It != Captures.end() && "Found lambda capture without field."); 3788 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second); 3789 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 3790 if (VD->getType().getCanonicalType()->isReferenceType()) 3791 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr, 3792 VD->getType().getCanonicalType()) 3793 .getAddress(CGF); 3794 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal); 3795 } 3796 } 3797 } 3798 3799 bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD, 3800 LangAS &AS) { 3801 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>()) 3802 return false; 3803 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 3804 switch(A->getAllocatorType()) { 3805 case OMPAllocateDeclAttr::OMPNullMemAlloc: 3806 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 3807 // Not supported, fallback to the default mem space. 3808 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 3809 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 3810 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 3811 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 3812 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 3813 AS = LangAS::Default; 3814 return true; 3815 case OMPAllocateDeclAttr::OMPConstMemAlloc: 3816 AS = LangAS::cuda_constant; 3817 return true; 3818 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 3819 AS = LangAS::cuda_shared; 3820 return true; 3821 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 3822 llvm_unreachable("Expected predefined allocator for the variables with the " 3823 "static storage."); 3824 } 3825 return false; 3826 } 3827 3828 // Get current CudaArch and ignore any unknown values 3829 static CudaArch getCudaArch(CodeGenModule &CGM) { 3830 if (!CGM.getTarget().hasFeature("ptx")) 3831 return CudaArch::UNKNOWN; 3832 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) { 3833 if (Feature.getValue()) { 3834 CudaArch Arch = StringToCudaArch(Feature.getKey()); 3835 if (Arch != CudaArch::UNKNOWN) 3836 return Arch; 3837 } 3838 } 3839 return CudaArch::UNKNOWN; 3840 } 3841 3842 /// Check to see if target architecture supports unified addressing which is 3843 /// a restriction for OpenMP requires clause "unified_shared_memory". 3844 void CGOpenMPRuntimeGPU::processRequiresDirective( 3845 const OMPRequiresDecl *D) { 3846 for (const OMPClause *Clause : D->clauselists()) { 3847 if (Clause->getClauseKind() == OMPC_unified_shared_memory) { 3848 CudaArch Arch = getCudaArch(CGM); 3849 switch (Arch) { 3850 case CudaArch::SM_20: 3851 case CudaArch::SM_21: 3852 case CudaArch::SM_30: 3853 case CudaArch::SM_32: 3854 case CudaArch::SM_35: 3855 case CudaArch::SM_37: 3856 case CudaArch::SM_50: 3857 case CudaArch::SM_52: 3858 case CudaArch::SM_53: { 3859 SmallString<256> Buffer; 3860 llvm::raw_svector_ostream Out(Buffer); 3861 Out << "Target architecture " << CudaArchToString(Arch) 3862 << " does not support unified addressing"; 3863 CGM.Error(Clause->getBeginLoc(), Out.str()); 3864 return; 3865 } 3866 case CudaArch::SM_60: 3867 case CudaArch::SM_61: 3868 case CudaArch::SM_62: 3869 case CudaArch::SM_70: 3870 case CudaArch::SM_72: 3871 case CudaArch::SM_75: 3872 case CudaArch::SM_80: 3873 case CudaArch::SM_86: 3874 case CudaArch::GFX600: 3875 case CudaArch::GFX601: 3876 case CudaArch::GFX602: 3877 case CudaArch::GFX700: 3878 case CudaArch::GFX701: 3879 case CudaArch::GFX702: 3880 case CudaArch::GFX703: 3881 case CudaArch::GFX704: 3882 case CudaArch::GFX705: 3883 case CudaArch::GFX801: 3884 case CudaArch::GFX802: 3885 case CudaArch::GFX803: 3886 case CudaArch::GFX805: 3887 case CudaArch::GFX810: 3888 case CudaArch::GFX900: 3889 case CudaArch::GFX902: 3890 case CudaArch::GFX904: 3891 case CudaArch::GFX906: 3892 case CudaArch::GFX908: 3893 case CudaArch::GFX909: 3894 case CudaArch::GFX90a: 3895 case CudaArch::GFX90c: 3896 case CudaArch::GFX1010: 3897 case CudaArch::GFX1011: 3898 case CudaArch::GFX1012: 3899 case CudaArch::GFX1013: 3900 case CudaArch::GFX1030: 3901 case CudaArch::GFX1031: 3902 case CudaArch::GFX1032: 3903 case CudaArch::GFX1033: 3904 case CudaArch::GFX1034: 3905 case CudaArch::GFX1035: 3906 case CudaArch::Generic: 3907 case CudaArch::UNUSED: 3908 case CudaArch::UNKNOWN: 3909 break; 3910 case CudaArch::LAST: 3911 llvm_unreachable("Unexpected Cuda arch."); 3912 } 3913 } 3914 } 3915 CGOpenMPRuntime::processRequiresDirective(D); 3916 } 3917 3918 void CGOpenMPRuntimeGPU::clear() { 3919 3920 if (!TeamsReductions.empty()) { 3921 ASTContext &C = CGM.getContext(); 3922 RecordDecl *StaticRD = C.buildImplicitRecord( 3923 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union); 3924 StaticRD->startDefinition(); 3925 for (const RecordDecl *TeamReductionRec : TeamsReductions) { 3926 QualType RecTy = C.getRecordType(TeamReductionRec); 3927 auto *Field = FieldDecl::Create( 3928 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy, 3929 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()), 3930 /*BW=*/nullptr, /*Mutable=*/false, 3931 /*InitStyle=*/ICIS_NoInit); 3932 Field->setAccess(AS_public); 3933 StaticRD->addDecl(Field); 3934 } 3935 StaticRD->completeDefinition(); 3936 QualType StaticTy = C.getRecordType(StaticRD); 3937 llvm::Type *LLVMReductionsBufferTy = 3938 CGM.getTypes().ConvertTypeForMem(StaticTy); 3939 // FIXME: nvlink does not handle weak linkage correctly (object with the 3940 // different size are reported as erroneous). 3941 // Restore CommonLinkage as soon as nvlink is fixed. 3942 auto *GV = new llvm::GlobalVariable( 3943 CGM.getModule(), LLVMReductionsBufferTy, 3944 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage, 3945 llvm::Constant::getNullValue(LLVMReductionsBufferTy), 3946 "_openmp_teams_reductions_buffer_$_"); 3947 KernelTeamsReductionPtr->setInitializer( 3948 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, 3949 CGM.VoidPtrTy)); 3950 } 3951 CGOpenMPRuntime::clear(); 3952 } 3953 3954 llvm::Value *CGOpenMPRuntimeGPU::getGPUNumThreads(CodeGenFunction &CGF) { 3955 CGBuilderTy &Bld = CGF.Builder; 3956 llvm::Module *M = &CGF.CGM.getModule(); 3957 const char *LocSize = "__kmpc_get_hardware_num_threads_in_block"; 3958 llvm::Function *F = M->getFunction(LocSize); 3959 if (!F) { 3960 F = llvm::Function::Create( 3961 llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false), 3962 llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule()); 3963 } 3964 return Bld.CreateCall(F, llvm::None, "nvptx_num_threads"); 3965 } 3966 3967 llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) { 3968 ArrayRef<llvm::Value *> Args{}; 3969 return CGF.EmitRuntimeCall( 3970 OMPBuilder.getOrCreateRuntimeFunction( 3971 CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block), 3972 Args); 3973 } 3974 3975 llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) { 3976 ArrayRef<llvm::Value *> Args{}; 3977 return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 3978 CGM.getModule(), OMPRTL___kmpc_get_warp_size), 3979 Args); 3980 } 3981