1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides a generalized class for OpenMP runtime code generation 10 // specialized by GPU targets NVPTX and AMDGCN. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntimeGPU.h" 15 #include "CGOpenMPRuntimeNVPTX.h" 16 #include "CodeGenFunction.h" 17 #include "clang/AST/Attr.h" 18 #include "clang/AST/DeclOpenMP.h" 19 #include "clang/AST/StmtOpenMP.h" 20 #include "clang/AST/StmtVisitor.h" 21 #include "clang/Basic/Cuda.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/Frontend/OpenMP/OMPGridValues.h" 24 #include "llvm/IR/IntrinsicsNVPTX.h" 25 26 using namespace clang; 27 using namespace CodeGen; 28 using namespace llvm::omp; 29 30 namespace { 31 enum OpenMPRTLFunctionNVPTX { 32 /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit, 33 /// int16_t RequiresOMPRuntime); 34 OMPRTL_NVPTX__kmpc_kernel_init, 35 /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized); 36 OMPRTL_NVPTX__kmpc_kernel_deinit, 37 /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit, 38 /// int16_t RequiresOMPRuntime); 39 OMPRTL_NVPTX__kmpc_spmd_kernel_init, 40 /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime); 41 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2, 42 /// Call to void __kmpc_kernel_prepare_parallel(void 43 /// *outlined_function); 44 OMPRTL_NVPTX__kmpc_kernel_prepare_parallel, 45 /// Call to bool __kmpc_kernel_parallel(void **outlined_function); 46 OMPRTL_NVPTX__kmpc_kernel_parallel, 47 /// Call to void __kmpc_kernel_end_parallel(); 48 OMPRTL_NVPTX__kmpc_kernel_end_parallel, 49 /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 50 /// global_tid); 51 OMPRTL_NVPTX__kmpc_serialized_parallel, 52 /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 53 /// global_tid); 54 OMPRTL_NVPTX__kmpc_end_serialized_parallel, 55 /// Call to int32_t __kmpc_shuffle_int32(int32_t element, 56 /// int16_t lane_offset, int16_t warp_size); 57 OMPRTL_NVPTX__kmpc_shuffle_int32, 58 /// Call to int64_t __kmpc_shuffle_int64(int64_t element, 59 /// int16_t lane_offset, int16_t warp_size); 60 OMPRTL_NVPTX__kmpc_shuffle_int64, 61 /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32 62 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data, 63 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t 64 /// lane_offset, int16_t shortCircuit), 65 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num)); 66 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2, 67 /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32 68 /// global_tid, void *global_buffer, int32_t num_of_records, void* 69 /// reduce_data, 70 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t 71 /// lane_offset, int16_t shortCircuit), 72 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void 73 /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data), 74 /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx, 75 /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer, 76 /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void 77 /// *buffer, int idx, void *reduce_data)); 78 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2, 79 /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid); 80 OMPRTL_NVPTX__kmpc_end_reduce_nowait, 81 /// Call to void __kmpc_data_sharing_init_stack(); 82 OMPRTL_NVPTX__kmpc_data_sharing_init_stack, 83 /// Call to void __kmpc_data_sharing_init_stack_spmd(); 84 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd, 85 /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size, 86 /// int16_t UseSharedMemory); 87 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack, 88 /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t 89 /// UseSharedMemory); 90 OMPRTL_NVPTX__kmpc_data_sharing_push_stack, 91 /// Call to void __kmpc_data_sharing_pop_stack(void *a); 92 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack, 93 /// Call to void __kmpc_begin_sharing_variables(void ***args, 94 /// size_t n_args); 95 OMPRTL_NVPTX__kmpc_begin_sharing_variables, 96 /// Call to void __kmpc_end_sharing_variables(); 97 OMPRTL_NVPTX__kmpc_end_sharing_variables, 98 /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs) 99 OMPRTL_NVPTX__kmpc_get_shared_variables, 100 /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 101 /// global_tid); 102 OMPRTL_NVPTX__kmpc_parallel_level, 103 /// Call to int8_t __kmpc_is_spmd_exec_mode(); 104 OMPRTL_NVPTX__kmpc_is_spmd_exec_mode, 105 /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode, 106 /// const void *buf, size_t size, int16_t is_shared, const void **res); 107 OMPRTL_NVPTX__kmpc_get_team_static_memory, 108 /// Call to void __kmpc_restore_team_static_memory(int16_t 109 /// isSPMDExecutionMode, int16_t is_shared); 110 OMPRTL_NVPTX__kmpc_restore_team_static_memory, 111 /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid); 112 OMPRTL__kmpc_barrier, 113 /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32 114 /// global_tid); 115 OMPRTL__kmpc_barrier_simple_spmd, 116 /// Call to int32_t __kmpc_warp_active_thread_mask(void); 117 OMPRTL_NVPTX__kmpc_warp_active_thread_mask, 118 /// Call to void __kmpc_syncwarp(int32_t Mask); 119 OMPRTL_NVPTX__kmpc_syncwarp, 120 }; 121 122 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX. 123 class NVPTXActionTy final : public PrePostActionTy { 124 llvm::FunctionCallee EnterCallee = nullptr; 125 ArrayRef<llvm::Value *> EnterArgs; 126 llvm::FunctionCallee ExitCallee = nullptr; 127 ArrayRef<llvm::Value *> ExitArgs; 128 bool Conditional = false; 129 llvm::BasicBlock *ContBlock = nullptr; 130 131 public: 132 NVPTXActionTy(llvm::FunctionCallee EnterCallee, 133 ArrayRef<llvm::Value *> EnterArgs, 134 llvm::FunctionCallee ExitCallee, 135 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false) 136 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee), 137 ExitArgs(ExitArgs), Conditional(Conditional) {} 138 void Enter(CodeGenFunction &CGF) override { 139 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs); 140 if (Conditional) { 141 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes); 142 auto *ThenBlock = CGF.createBasicBlock("omp_if.then"); 143 ContBlock = CGF.createBasicBlock("omp_if.end"); 144 // Generate the branch (If-stmt) 145 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); 146 CGF.EmitBlock(ThenBlock); 147 } 148 } 149 void Done(CodeGenFunction &CGF) { 150 // Emit the rest of blocks/branches 151 CGF.EmitBranch(ContBlock); 152 CGF.EmitBlock(ContBlock, true); 153 } 154 void Exit(CodeGenFunction &CGF) override { 155 CGF.EmitRuntimeCall(ExitCallee, ExitArgs); 156 } 157 }; 158 159 /// A class to track the execution mode when codegening directives within 160 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry 161 /// to the target region and used by containing directives such as 'parallel' 162 /// to emit optimized code. 163 class ExecutionRuntimeModesRAII { 164 private: 165 CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode = 166 CGOpenMPRuntimeGPU::EM_Unknown; 167 CGOpenMPRuntimeGPU::ExecutionMode &ExecMode; 168 bool SavedRuntimeMode = false; 169 bool *RuntimeMode = nullptr; 170 171 public: 172 /// Constructor for Non-SPMD mode. 173 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode) 174 : ExecMode(ExecMode) { 175 SavedExecMode = ExecMode; 176 ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD; 177 } 178 /// Constructor for SPMD mode. 179 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode, 180 bool &RuntimeMode, bool FullRuntimeMode) 181 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) { 182 SavedExecMode = ExecMode; 183 SavedRuntimeMode = RuntimeMode; 184 ExecMode = CGOpenMPRuntimeGPU::EM_SPMD; 185 RuntimeMode = FullRuntimeMode; 186 } 187 ~ExecutionRuntimeModesRAII() { 188 ExecMode = SavedExecMode; 189 if (RuntimeMode) 190 *RuntimeMode = SavedRuntimeMode; 191 } 192 }; 193 194 /// GPU Configuration: This information can be derived from cuda registers, 195 /// however, providing compile time constants helps generate more efficient 196 /// code. For all practical purposes this is fine because the configuration 197 /// is the same for all known NVPTX architectures. 198 enum MachineConfiguration : unsigned { 199 /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target 200 /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2, 201 /// and GV_Warp_Size_Log2_Mask. 202 203 /// Global memory alignment for performance. 204 GlobalMemoryAlignment = 128, 205 206 /// Maximal size of the shared memory buffer. 207 SharedMemorySize = 128, 208 }; 209 210 static const ValueDecl *getPrivateItem(const Expr *RefExpr) { 211 RefExpr = RefExpr->IgnoreParens(); 212 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) { 213 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts(); 214 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 215 Base = TempASE->getBase()->IgnoreParenImpCasts(); 216 RefExpr = Base; 217 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) { 218 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts(); 219 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) 220 Base = TempOASE->getBase()->IgnoreParenImpCasts(); 221 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 222 Base = TempASE->getBase()->IgnoreParenImpCasts(); 223 RefExpr = Base; 224 } 225 RefExpr = RefExpr->IgnoreParenImpCasts(); 226 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr)) 227 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl()); 228 const auto *ME = cast<MemberExpr>(RefExpr); 229 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl()); 230 } 231 232 233 static RecordDecl *buildRecordForGlobalizedVars( 234 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls, 235 ArrayRef<const ValueDecl *> EscapedDeclsForTeams, 236 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 237 &MappedDeclsFields, int BufSize) { 238 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>; 239 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty()) 240 return nullptr; 241 SmallVector<VarsDataTy, 4> GlobalizedVars; 242 for (const ValueDecl *D : EscapedDecls) 243 GlobalizedVars.emplace_back( 244 CharUnits::fromQuantity(std::max( 245 C.getDeclAlign(D).getQuantity(), 246 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))), 247 D); 248 for (const ValueDecl *D : EscapedDeclsForTeams) 249 GlobalizedVars.emplace_back(C.getDeclAlign(D), D); 250 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) { 251 return L.first > R.first; 252 }); 253 254 // Build struct _globalized_locals_ty { 255 // /* globalized vars */[WarSize] align (max(decl_align, 256 // GlobalMemoryAlignment)) 257 // /* globalized vars */ for EscapedDeclsForTeams 258 // }; 259 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty"); 260 GlobalizedRD->startDefinition(); 261 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped( 262 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end()); 263 for (const auto &Pair : GlobalizedVars) { 264 const ValueDecl *VD = Pair.second; 265 QualType Type = VD->getType(); 266 if (Type->isLValueReferenceType()) 267 Type = C.getPointerType(Type.getNonReferenceType()); 268 else 269 Type = Type.getNonReferenceType(); 270 SourceLocation Loc = VD->getLocation(); 271 FieldDecl *Field; 272 if (SingleEscaped.count(VD)) { 273 Field = FieldDecl::Create( 274 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 275 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 276 /*BW=*/nullptr, /*Mutable=*/false, 277 /*InitStyle=*/ICIS_NoInit); 278 Field->setAccess(AS_public); 279 if (VD->hasAttrs()) { 280 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()), 281 E(VD->getAttrs().end()); 282 I != E; ++I) 283 Field->addAttr(*I); 284 } 285 } else { 286 llvm::APInt ArraySize(32, BufSize); 287 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal, 288 0); 289 Field = FieldDecl::Create( 290 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 291 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 292 /*BW=*/nullptr, /*Mutable=*/false, 293 /*InitStyle=*/ICIS_NoInit); 294 Field->setAccess(AS_public); 295 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(), 296 static_cast<CharUnits::QuantityType>( 297 GlobalMemoryAlignment))); 298 Field->addAttr(AlignedAttr::CreateImplicit( 299 C, /*IsAlignmentExpr=*/true, 300 IntegerLiteral::Create(C, Align, 301 C.getIntTypeForBitwidth(32, /*Signed=*/0), 302 SourceLocation()), 303 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned)); 304 } 305 GlobalizedRD->addDecl(Field); 306 MappedDeclsFields.try_emplace(VD, Field); 307 } 308 GlobalizedRD->completeDefinition(); 309 return GlobalizedRD; 310 } 311 312 /// Get the list of variables that can escape their declaration context. 313 class CheckVarsEscapingDeclContext final 314 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> { 315 CodeGenFunction &CGF; 316 llvm::SetVector<const ValueDecl *> EscapedDecls; 317 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls; 318 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters; 319 RecordDecl *GlobalizedRD = nullptr; 320 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 321 bool AllEscaped = false; 322 bool IsForCombinedParallelRegion = false; 323 324 void markAsEscaped(const ValueDecl *VD) { 325 // Do not globalize declare target variables. 326 if (!isa<VarDecl>(VD) || 327 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 328 return; 329 VD = cast<ValueDecl>(VD->getCanonicalDecl()); 330 // Use user-specified allocation. 331 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>()) 332 return; 333 // Variables captured by value must be globalized. 334 if (auto *CSI = CGF.CapturedStmtInfo) { 335 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) { 336 // Check if need to capture the variable that was already captured by 337 // value in the outer region. 338 if (!IsForCombinedParallelRegion) { 339 if (!FD->hasAttrs()) 340 return; 341 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>(); 342 if (!Attr) 343 return; 344 if (((Attr->getCaptureKind() != OMPC_map) && 345 !isOpenMPPrivate(Attr->getCaptureKind())) || 346 ((Attr->getCaptureKind() == OMPC_map) && 347 !FD->getType()->isAnyPointerType())) 348 return; 349 } 350 if (!FD->getType()->isReferenceType()) { 351 assert(!VD->getType()->isVariablyModifiedType() && 352 "Parameter captured by value with variably modified type"); 353 EscapedParameters.insert(VD); 354 } else if (!IsForCombinedParallelRegion) { 355 return; 356 } 357 } 358 } 359 if ((!CGF.CapturedStmtInfo || 360 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) && 361 VD->getType()->isReferenceType()) 362 // Do not globalize variables with reference type. 363 return; 364 if (VD->getType()->isVariablyModifiedType()) 365 EscapedVariableLengthDecls.insert(VD); 366 else 367 EscapedDecls.insert(VD); 368 } 369 370 void VisitValueDecl(const ValueDecl *VD) { 371 if (VD->getType()->isLValueReferenceType()) 372 markAsEscaped(VD); 373 if (const auto *VarD = dyn_cast<VarDecl>(VD)) { 374 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) { 375 const bool SavedAllEscaped = AllEscaped; 376 AllEscaped = VD->getType()->isLValueReferenceType(); 377 Visit(VarD->getInit()); 378 AllEscaped = SavedAllEscaped; 379 } 380 } 381 } 382 void VisitOpenMPCapturedStmt(const CapturedStmt *S, 383 ArrayRef<OMPClause *> Clauses, 384 bool IsCombinedParallelRegion) { 385 if (!S) 386 return; 387 for (const CapturedStmt::Capture &C : S->captures()) { 388 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 389 const ValueDecl *VD = C.getCapturedVar(); 390 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion; 391 if (IsCombinedParallelRegion) { 392 // Check if the variable is privatized in the combined construct and 393 // those private copies must be shared in the inner parallel 394 // directive. 395 IsForCombinedParallelRegion = false; 396 for (const OMPClause *C : Clauses) { 397 if (!isOpenMPPrivate(C->getClauseKind()) || 398 C->getClauseKind() == OMPC_reduction || 399 C->getClauseKind() == OMPC_linear || 400 C->getClauseKind() == OMPC_private) 401 continue; 402 ArrayRef<const Expr *> Vars; 403 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C)) 404 Vars = PC->getVarRefs(); 405 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C)) 406 Vars = PC->getVarRefs(); 407 else 408 llvm_unreachable("Unexpected clause."); 409 for (const auto *E : Vars) { 410 const Decl *D = 411 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 412 if (D == VD->getCanonicalDecl()) { 413 IsForCombinedParallelRegion = true; 414 break; 415 } 416 } 417 if (IsForCombinedParallelRegion) 418 break; 419 } 420 } 421 markAsEscaped(VD); 422 if (isa<OMPCapturedExprDecl>(VD)) 423 VisitValueDecl(VD); 424 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion; 425 } 426 } 427 } 428 429 void buildRecordForGlobalizedVars(bool IsInTTDRegion) { 430 assert(!GlobalizedRD && 431 "Record for globalized variables is built already."); 432 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams; 433 unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size); 434 if (IsInTTDRegion) 435 EscapedDeclsForTeams = EscapedDecls.getArrayRef(); 436 else 437 EscapedDeclsForParallel = EscapedDecls.getArrayRef(); 438 GlobalizedRD = ::buildRecordForGlobalizedVars( 439 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams, 440 MappedDeclsFields, WarpSize); 441 } 442 443 public: 444 CheckVarsEscapingDeclContext(CodeGenFunction &CGF, 445 ArrayRef<const ValueDecl *> TeamsReductions) 446 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) { 447 } 448 virtual ~CheckVarsEscapingDeclContext() = default; 449 void VisitDeclStmt(const DeclStmt *S) { 450 if (!S) 451 return; 452 for (const Decl *D : S->decls()) 453 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D)) 454 VisitValueDecl(VD); 455 } 456 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) { 457 if (!D) 458 return; 459 if (!D->hasAssociatedStmt()) 460 return; 461 if (const auto *S = 462 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) { 463 // Do not analyze directives that do not actually require capturing, 464 // like `omp for` or `omp simd` directives. 465 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 466 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind()); 467 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) { 468 VisitStmt(S->getCapturedStmt()); 469 return; 470 } 471 VisitOpenMPCapturedStmt( 472 S, D->clauses(), 473 CaptureRegions.back() == OMPD_parallel && 474 isOpenMPDistributeDirective(D->getDirectiveKind())); 475 } 476 } 477 void VisitCapturedStmt(const CapturedStmt *S) { 478 if (!S) 479 return; 480 for (const CapturedStmt::Capture &C : S->captures()) { 481 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 482 const ValueDecl *VD = C.getCapturedVar(); 483 markAsEscaped(VD); 484 if (isa<OMPCapturedExprDecl>(VD)) 485 VisitValueDecl(VD); 486 } 487 } 488 } 489 void VisitLambdaExpr(const LambdaExpr *E) { 490 if (!E) 491 return; 492 for (const LambdaCapture &C : E->captures()) { 493 if (C.capturesVariable()) { 494 if (C.getCaptureKind() == LCK_ByRef) { 495 const ValueDecl *VD = C.getCapturedVar(); 496 markAsEscaped(VD); 497 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD)) 498 VisitValueDecl(VD); 499 } 500 } 501 } 502 } 503 void VisitBlockExpr(const BlockExpr *E) { 504 if (!E) 505 return; 506 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) { 507 if (C.isByRef()) { 508 const VarDecl *VD = C.getVariable(); 509 markAsEscaped(VD); 510 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture()) 511 VisitValueDecl(VD); 512 } 513 } 514 } 515 void VisitCallExpr(const CallExpr *E) { 516 if (!E) 517 return; 518 for (const Expr *Arg : E->arguments()) { 519 if (!Arg) 520 continue; 521 if (Arg->isLValue()) { 522 const bool SavedAllEscaped = AllEscaped; 523 AllEscaped = true; 524 Visit(Arg); 525 AllEscaped = SavedAllEscaped; 526 } else { 527 Visit(Arg); 528 } 529 } 530 Visit(E->getCallee()); 531 } 532 void VisitDeclRefExpr(const DeclRefExpr *E) { 533 if (!E) 534 return; 535 const ValueDecl *VD = E->getDecl(); 536 if (AllEscaped) 537 markAsEscaped(VD); 538 if (isa<OMPCapturedExprDecl>(VD)) 539 VisitValueDecl(VD); 540 else if (const auto *VarD = dyn_cast<VarDecl>(VD)) 541 if (VarD->isInitCapture()) 542 VisitValueDecl(VD); 543 } 544 void VisitUnaryOperator(const UnaryOperator *E) { 545 if (!E) 546 return; 547 if (E->getOpcode() == UO_AddrOf) { 548 const bool SavedAllEscaped = AllEscaped; 549 AllEscaped = true; 550 Visit(E->getSubExpr()); 551 AllEscaped = SavedAllEscaped; 552 } else { 553 Visit(E->getSubExpr()); 554 } 555 } 556 void VisitImplicitCastExpr(const ImplicitCastExpr *E) { 557 if (!E) 558 return; 559 if (E->getCastKind() == CK_ArrayToPointerDecay) { 560 const bool SavedAllEscaped = AllEscaped; 561 AllEscaped = true; 562 Visit(E->getSubExpr()); 563 AllEscaped = SavedAllEscaped; 564 } else { 565 Visit(E->getSubExpr()); 566 } 567 } 568 void VisitExpr(const Expr *E) { 569 if (!E) 570 return; 571 bool SavedAllEscaped = AllEscaped; 572 if (!E->isLValue()) 573 AllEscaped = false; 574 for (const Stmt *Child : E->children()) 575 if (Child) 576 Visit(Child); 577 AllEscaped = SavedAllEscaped; 578 } 579 void VisitStmt(const Stmt *S) { 580 if (!S) 581 return; 582 for (const Stmt *Child : S->children()) 583 if (Child) 584 Visit(Child); 585 } 586 587 /// Returns the record that handles all the escaped local variables and used 588 /// instead of their original storage. 589 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) { 590 if (!GlobalizedRD) 591 buildRecordForGlobalizedVars(IsInTTDRegion); 592 return GlobalizedRD; 593 } 594 595 /// Returns the field in the globalized record for the escaped variable. 596 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const { 597 assert(GlobalizedRD && 598 "Record for globalized variables must be generated already."); 599 auto I = MappedDeclsFields.find(VD); 600 if (I == MappedDeclsFields.end()) 601 return nullptr; 602 return I->getSecond(); 603 } 604 605 /// Returns the list of the escaped local variables/parameters. 606 ArrayRef<const ValueDecl *> getEscapedDecls() const { 607 return EscapedDecls.getArrayRef(); 608 } 609 610 /// Checks if the escaped local variable is actually a parameter passed by 611 /// value. 612 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const { 613 return EscapedParameters; 614 } 615 616 /// Returns the list of the escaped variables with the variably modified 617 /// types. 618 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const { 619 return EscapedVariableLengthDecls.getArrayRef(); 620 } 621 }; 622 } // anonymous namespace 623 624 /// Get the id of the warp in the block. 625 /// We assume that the warp size is 32, which is always the case 626 /// on the NVPTX device, to generate more efficient code. 627 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) { 628 CGBuilderTy &Bld = CGF.Builder; 629 unsigned LaneIDBits = 630 CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2); 631 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 632 return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id"); 633 } 634 635 /// Get the id of the current lane in the Warp. 636 /// We assume that the warp size is 32, which is always the case 637 /// on the NVPTX device, to generate more efficient code. 638 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) { 639 CGBuilderTy &Bld = CGF.Builder; 640 unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue( 641 llvm::omp::GV_Warp_Size_Log2_Mask); 642 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 643 return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask), 644 "nvptx_lane_id"); 645 } 646 647 /// Get the value of the thread_limit clause in the teams directive. 648 /// For the 'generic' execution mode, the runtime encodes thread_limit in 649 /// the launch parameters, always starting thread_limit+warpSize threads per 650 /// CTA. The threads in the last warp are reserved for master execution. 651 /// For the 'spmd' execution mode, all threads in a CTA are part of the team. 652 static llvm::Value *getThreadLimit(CodeGenFunction &CGF, 653 bool IsInSPMDExecutionMode = false) { 654 CGBuilderTy &Bld = CGF.Builder; 655 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 656 return IsInSPMDExecutionMode 657 ? RT.getGPUNumThreads(CGF) 658 : Bld.CreateNUWSub(RT.getGPUNumThreads(CGF), 659 RT.getGPUWarpSize(CGF), "thread_limit"); 660 } 661 662 /// Get the thread id of the OMP master thread. 663 /// The master thread id is the first thread (lane) of the last warp in the 664 /// GPU block. Warp size is assumed to be some power of 2. 665 /// Thread id is 0 indexed. 666 /// E.g: If NumThreads is 33, master id is 32. 667 /// If NumThreads is 64, master id is 32. 668 /// If NumThreads is 1024, master id is 992. 669 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) { 670 CGBuilderTy &Bld = CGF.Builder; 671 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 672 llvm::Value *NumThreads = RT.getGPUNumThreads(CGF); 673 // We assume that the warp size is a power of 2. 674 llvm::Value *Mask = Bld.CreateNUWSub(RT.getGPUWarpSize(CGF), Bld.getInt32(1)); 675 676 return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)), 677 Bld.CreateNot(Mask), "master_tid"); 678 } 679 680 CGOpenMPRuntimeGPU::WorkerFunctionState::WorkerFunctionState( 681 CodeGenModule &CGM, SourceLocation Loc) 682 : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()), 683 Loc(Loc) { 684 createWorkerFunction(CGM); 685 } 686 687 void CGOpenMPRuntimeGPU::WorkerFunctionState::createWorkerFunction( 688 CodeGenModule &CGM) { 689 // Create an worker function with no arguments. 690 691 WorkerFn = llvm::Function::Create( 692 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 693 /*placeholder=*/"_worker", &CGM.getModule()); 694 CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI); 695 WorkerFn->setDoesNotRecurse(); 696 } 697 698 CGOpenMPRuntimeGPU::ExecutionMode 699 CGOpenMPRuntimeGPU::getExecutionMode() const { 700 return CurrentExecutionMode; 701 } 702 703 static CGOpenMPRuntimeGPU::DataSharingMode 704 getDataSharingMode(CodeGenModule &CGM) { 705 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA 706 : CGOpenMPRuntimeGPU::Generic; 707 } 708 709 /// Check for inner (nested) SPMD construct, if any 710 static bool hasNestedSPMDDirective(ASTContext &Ctx, 711 const OMPExecutableDirective &D) { 712 const auto *CS = D.getInnermostCapturedStmt(); 713 const auto *Body = 714 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 715 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 716 717 if (const auto *NestedDir = 718 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 719 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 720 switch (D.getDirectiveKind()) { 721 case OMPD_target: 722 if (isOpenMPParallelDirective(DKind)) 723 return true; 724 if (DKind == OMPD_teams) { 725 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 726 /*IgnoreCaptured=*/true); 727 if (!Body) 728 return false; 729 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 730 if (const auto *NND = 731 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 732 DKind = NND->getDirectiveKind(); 733 if (isOpenMPParallelDirective(DKind)) 734 return true; 735 } 736 } 737 return false; 738 case OMPD_target_teams: 739 return isOpenMPParallelDirective(DKind); 740 case OMPD_target_simd: 741 case OMPD_target_parallel: 742 case OMPD_target_parallel_for: 743 case OMPD_target_parallel_for_simd: 744 case OMPD_target_teams_distribute: 745 case OMPD_target_teams_distribute_simd: 746 case OMPD_target_teams_distribute_parallel_for: 747 case OMPD_target_teams_distribute_parallel_for_simd: 748 case OMPD_parallel: 749 case OMPD_for: 750 case OMPD_parallel_for: 751 case OMPD_parallel_master: 752 case OMPD_parallel_sections: 753 case OMPD_for_simd: 754 case OMPD_parallel_for_simd: 755 case OMPD_cancel: 756 case OMPD_cancellation_point: 757 case OMPD_ordered: 758 case OMPD_threadprivate: 759 case OMPD_allocate: 760 case OMPD_task: 761 case OMPD_simd: 762 case OMPD_sections: 763 case OMPD_section: 764 case OMPD_single: 765 case OMPD_master: 766 case OMPD_critical: 767 case OMPD_taskyield: 768 case OMPD_barrier: 769 case OMPD_taskwait: 770 case OMPD_taskgroup: 771 case OMPD_atomic: 772 case OMPD_flush: 773 case OMPD_depobj: 774 case OMPD_scan: 775 case OMPD_teams: 776 case OMPD_target_data: 777 case OMPD_target_exit_data: 778 case OMPD_target_enter_data: 779 case OMPD_distribute: 780 case OMPD_distribute_simd: 781 case OMPD_distribute_parallel_for: 782 case OMPD_distribute_parallel_for_simd: 783 case OMPD_teams_distribute: 784 case OMPD_teams_distribute_simd: 785 case OMPD_teams_distribute_parallel_for: 786 case OMPD_teams_distribute_parallel_for_simd: 787 case OMPD_target_update: 788 case OMPD_declare_simd: 789 case OMPD_declare_variant: 790 case OMPD_begin_declare_variant: 791 case OMPD_end_declare_variant: 792 case OMPD_declare_target: 793 case OMPD_end_declare_target: 794 case OMPD_declare_reduction: 795 case OMPD_declare_mapper: 796 case OMPD_taskloop: 797 case OMPD_taskloop_simd: 798 case OMPD_master_taskloop: 799 case OMPD_master_taskloop_simd: 800 case OMPD_parallel_master_taskloop: 801 case OMPD_parallel_master_taskloop_simd: 802 case OMPD_requires: 803 case OMPD_unknown: 804 default: 805 llvm_unreachable("Unexpected directive."); 806 } 807 } 808 809 return false; 810 } 811 812 static bool supportsSPMDExecutionMode(ASTContext &Ctx, 813 const OMPExecutableDirective &D) { 814 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 815 switch (DirectiveKind) { 816 case OMPD_target: 817 case OMPD_target_teams: 818 return hasNestedSPMDDirective(Ctx, D); 819 case OMPD_target_parallel: 820 case OMPD_target_parallel_for: 821 case OMPD_target_parallel_for_simd: 822 case OMPD_target_teams_distribute_parallel_for: 823 case OMPD_target_teams_distribute_parallel_for_simd: 824 case OMPD_target_simd: 825 case OMPD_target_teams_distribute_simd: 826 return true; 827 case OMPD_target_teams_distribute: 828 return false; 829 case OMPD_parallel: 830 case OMPD_for: 831 case OMPD_parallel_for: 832 case OMPD_parallel_master: 833 case OMPD_parallel_sections: 834 case OMPD_for_simd: 835 case OMPD_parallel_for_simd: 836 case OMPD_cancel: 837 case OMPD_cancellation_point: 838 case OMPD_ordered: 839 case OMPD_threadprivate: 840 case OMPD_allocate: 841 case OMPD_task: 842 case OMPD_simd: 843 case OMPD_sections: 844 case OMPD_section: 845 case OMPD_single: 846 case OMPD_master: 847 case OMPD_critical: 848 case OMPD_taskyield: 849 case OMPD_barrier: 850 case OMPD_taskwait: 851 case OMPD_taskgroup: 852 case OMPD_atomic: 853 case OMPD_flush: 854 case OMPD_depobj: 855 case OMPD_scan: 856 case OMPD_teams: 857 case OMPD_target_data: 858 case OMPD_target_exit_data: 859 case OMPD_target_enter_data: 860 case OMPD_distribute: 861 case OMPD_distribute_simd: 862 case OMPD_distribute_parallel_for: 863 case OMPD_distribute_parallel_for_simd: 864 case OMPD_teams_distribute: 865 case OMPD_teams_distribute_simd: 866 case OMPD_teams_distribute_parallel_for: 867 case OMPD_teams_distribute_parallel_for_simd: 868 case OMPD_target_update: 869 case OMPD_declare_simd: 870 case OMPD_declare_variant: 871 case OMPD_begin_declare_variant: 872 case OMPD_end_declare_variant: 873 case OMPD_declare_target: 874 case OMPD_end_declare_target: 875 case OMPD_declare_reduction: 876 case OMPD_declare_mapper: 877 case OMPD_taskloop: 878 case OMPD_taskloop_simd: 879 case OMPD_master_taskloop: 880 case OMPD_master_taskloop_simd: 881 case OMPD_parallel_master_taskloop: 882 case OMPD_parallel_master_taskloop_simd: 883 case OMPD_requires: 884 case OMPD_unknown: 885 default: 886 break; 887 } 888 llvm_unreachable( 889 "Unknown programming model for OpenMP directive on NVPTX target."); 890 } 891 892 /// Check if the directive is loops based and has schedule clause at all or has 893 /// static scheduling. 894 static bool hasStaticScheduling(const OMPExecutableDirective &D) { 895 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) && 896 isOpenMPLoopDirective(D.getDirectiveKind()) && 897 "Expected loop-based directive."); 898 return !D.hasClausesOfKind<OMPOrderedClause>() && 899 (!D.hasClausesOfKind<OMPScheduleClause>() || 900 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(), 901 [](const OMPScheduleClause *C) { 902 return C->getScheduleKind() == OMPC_SCHEDULE_static; 903 })); 904 } 905 906 /// Check for inner (nested) lightweight runtime construct, if any 907 static bool hasNestedLightweightDirective(ASTContext &Ctx, 908 const OMPExecutableDirective &D) { 909 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive."); 910 const auto *CS = D.getInnermostCapturedStmt(); 911 const auto *Body = 912 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 913 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 914 915 if (const auto *NestedDir = 916 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 917 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 918 switch (D.getDirectiveKind()) { 919 case OMPD_target: 920 if (isOpenMPParallelDirective(DKind) && 921 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 922 hasStaticScheduling(*NestedDir)) 923 return true; 924 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd) 925 return true; 926 if (DKind == OMPD_parallel) { 927 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 928 /*IgnoreCaptured=*/true); 929 if (!Body) 930 return false; 931 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 932 if (const auto *NND = 933 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 934 DKind = NND->getDirectiveKind(); 935 if (isOpenMPWorksharingDirective(DKind) && 936 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 937 return true; 938 } 939 } else if (DKind == OMPD_teams) { 940 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 941 /*IgnoreCaptured=*/true); 942 if (!Body) 943 return false; 944 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 945 if (const auto *NND = 946 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 947 DKind = NND->getDirectiveKind(); 948 if (isOpenMPParallelDirective(DKind) && 949 isOpenMPWorksharingDirective(DKind) && 950 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 951 return true; 952 if (DKind == OMPD_parallel) { 953 Body = NND->getInnermostCapturedStmt()->IgnoreContainers( 954 /*IgnoreCaptured=*/true); 955 if (!Body) 956 return false; 957 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 958 if (const auto *NND = 959 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 960 DKind = NND->getDirectiveKind(); 961 if (isOpenMPWorksharingDirective(DKind) && 962 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 963 return true; 964 } 965 } 966 } 967 } 968 return false; 969 case OMPD_target_teams: 970 if (isOpenMPParallelDirective(DKind) && 971 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 972 hasStaticScheduling(*NestedDir)) 973 return true; 974 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd) 975 return true; 976 if (DKind == OMPD_parallel) { 977 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 978 /*IgnoreCaptured=*/true); 979 if (!Body) 980 return false; 981 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 982 if (const auto *NND = 983 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 984 DKind = NND->getDirectiveKind(); 985 if (isOpenMPWorksharingDirective(DKind) && 986 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 987 return true; 988 } 989 } 990 return false; 991 case OMPD_target_parallel: 992 if (DKind == OMPD_simd) 993 return true; 994 return isOpenMPWorksharingDirective(DKind) && 995 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir); 996 case OMPD_target_teams_distribute: 997 case OMPD_target_simd: 998 case OMPD_target_parallel_for: 999 case OMPD_target_parallel_for_simd: 1000 case OMPD_target_teams_distribute_simd: 1001 case OMPD_target_teams_distribute_parallel_for: 1002 case OMPD_target_teams_distribute_parallel_for_simd: 1003 case OMPD_parallel: 1004 case OMPD_for: 1005 case OMPD_parallel_for: 1006 case OMPD_parallel_master: 1007 case OMPD_parallel_sections: 1008 case OMPD_for_simd: 1009 case OMPD_parallel_for_simd: 1010 case OMPD_cancel: 1011 case OMPD_cancellation_point: 1012 case OMPD_ordered: 1013 case OMPD_threadprivate: 1014 case OMPD_allocate: 1015 case OMPD_task: 1016 case OMPD_simd: 1017 case OMPD_sections: 1018 case OMPD_section: 1019 case OMPD_single: 1020 case OMPD_master: 1021 case OMPD_critical: 1022 case OMPD_taskyield: 1023 case OMPD_barrier: 1024 case OMPD_taskwait: 1025 case OMPD_taskgroup: 1026 case OMPD_atomic: 1027 case OMPD_flush: 1028 case OMPD_depobj: 1029 case OMPD_scan: 1030 case OMPD_teams: 1031 case OMPD_target_data: 1032 case OMPD_target_exit_data: 1033 case OMPD_target_enter_data: 1034 case OMPD_distribute: 1035 case OMPD_distribute_simd: 1036 case OMPD_distribute_parallel_for: 1037 case OMPD_distribute_parallel_for_simd: 1038 case OMPD_teams_distribute: 1039 case OMPD_teams_distribute_simd: 1040 case OMPD_teams_distribute_parallel_for: 1041 case OMPD_teams_distribute_parallel_for_simd: 1042 case OMPD_target_update: 1043 case OMPD_declare_simd: 1044 case OMPD_declare_variant: 1045 case OMPD_begin_declare_variant: 1046 case OMPD_end_declare_variant: 1047 case OMPD_declare_target: 1048 case OMPD_end_declare_target: 1049 case OMPD_declare_reduction: 1050 case OMPD_declare_mapper: 1051 case OMPD_taskloop: 1052 case OMPD_taskloop_simd: 1053 case OMPD_master_taskloop: 1054 case OMPD_master_taskloop_simd: 1055 case OMPD_parallel_master_taskloop: 1056 case OMPD_parallel_master_taskloop_simd: 1057 case OMPD_requires: 1058 case OMPD_unknown: 1059 default: 1060 llvm_unreachable("Unexpected directive."); 1061 } 1062 } 1063 1064 return false; 1065 } 1066 1067 /// Checks if the construct supports lightweight runtime. It must be SPMD 1068 /// construct + inner loop-based construct with static scheduling. 1069 static bool supportsLightweightRuntime(ASTContext &Ctx, 1070 const OMPExecutableDirective &D) { 1071 if (!supportsSPMDExecutionMode(Ctx, D)) 1072 return false; 1073 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 1074 switch (DirectiveKind) { 1075 case OMPD_target: 1076 case OMPD_target_teams: 1077 case OMPD_target_parallel: 1078 return hasNestedLightweightDirective(Ctx, D); 1079 case OMPD_target_parallel_for: 1080 case OMPD_target_parallel_for_simd: 1081 case OMPD_target_teams_distribute_parallel_for: 1082 case OMPD_target_teams_distribute_parallel_for_simd: 1083 // (Last|First)-privates must be shared in parallel region. 1084 return hasStaticScheduling(D); 1085 case OMPD_target_simd: 1086 case OMPD_target_teams_distribute_simd: 1087 return true; 1088 case OMPD_target_teams_distribute: 1089 return false; 1090 case OMPD_parallel: 1091 case OMPD_for: 1092 case OMPD_parallel_for: 1093 case OMPD_parallel_master: 1094 case OMPD_parallel_sections: 1095 case OMPD_for_simd: 1096 case OMPD_parallel_for_simd: 1097 case OMPD_cancel: 1098 case OMPD_cancellation_point: 1099 case OMPD_ordered: 1100 case OMPD_threadprivate: 1101 case OMPD_allocate: 1102 case OMPD_task: 1103 case OMPD_simd: 1104 case OMPD_sections: 1105 case OMPD_section: 1106 case OMPD_single: 1107 case OMPD_master: 1108 case OMPD_critical: 1109 case OMPD_taskyield: 1110 case OMPD_barrier: 1111 case OMPD_taskwait: 1112 case OMPD_taskgroup: 1113 case OMPD_atomic: 1114 case OMPD_flush: 1115 case OMPD_depobj: 1116 case OMPD_scan: 1117 case OMPD_teams: 1118 case OMPD_target_data: 1119 case OMPD_target_exit_data: 1120 case OMPD_target_enter_data: 1121 case OMPD_distribute: 1122 case OMPD_distribute_simd: 1123 case OMPD_distribute_parallel_for: 1124 case OMPD_distribute_parallel_for_simd: 1125 case OMPD_teams_distribute: 1126 case OMPD_teams_distribute_simd: 1127 case OMPD_teams_distribute_parallel_for: 1128 case OMPD_teams_distribute_parallel_for_simd: 1129 case OMPD_target_update: 1130 case OMPD_declare_simd: 1131 case OMPD_declare_variant: 1132 case OMPD_begin_declare_variant: 1133 case OMPD_end_declare_variant: 1134 case OMPD_declare_target: 1135 case OMPD_end_declare_target: 1136 case OMPD_declare_reduction: 1137 case OMPD_declare_mapper: 1138 case OMPD_taskloop: 1139 case OMPD_taskloop_simd: 1140 case OMPD_master_taskloop: 1141 case OMPD_master_taskloop_simd: 1142 case OMPD_parallel_master_taskloop: 1143 case OMPD_parallel_master_taskloop_simd: 1144 case OMPD_requires: 1145 case OMPD_unknown: 1146 default: 1147 break; 1148 } 1149 llvm_unreachable( 1150 "Unknown programming model for OpenMP directive on NVPTX target."); 1151 } 1152 1153 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D, 1154 StringRef ParentName, 1155 llvm::Function *&OutlinedFn, 1156 llvm::Constant *&OutlinedFnID, 1157 bool IsOffloadEntry, 1158 const RegionCodeGenTy &CodeGen) { 1159 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode); 1160 EntryFunctionState EST; 1161 WorkerFunctionState WST(CGM, D.getBeginLoc()); 1162 Work.clear(); 1163 WrapperFunctionsMap.clear(); 1164 1165 // Emit target region as a standalone region. 1166 class NVPTXPrePostActionTy : public PrePostActionTy { 1167 CGOpenMPRuntimeGPU::EntryFunctionState &EST; 1168 CGOpenMPRuntimeGPU::WorkerFunctionState &WST; 1169 1170 public: 1171 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST, 1172 CGOpenMPRuntimeGPU::WorkerFunctionState &WST) 1173 : EST(EST), WST(WST) {} 1174 void Enter(CodeGenFunction &CGF) override { 1175 auto &RT = 1176 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1177 RT.emitNonSPMDEntryHeader(CGF, EST, WST); 1178 // Skip target region initialization. 1179 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1180 } 1181 void Exit(CodeGenFunction &CGF) override { 1182 auto &RT = 1183 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1184 RT.clearLocThreadIdInsertPt(CGF); 1185 RT.emitNonSPMDEntryFooter(CGF, EST); 1186 } 1187 } Action(EST, WST); 1188 CodeGen.setAction(Action); 1189 IsInTTDRegion = true; 1190 // Reserve place for the globalized memory. 1191 GlobalizedRecords.emplace_back(); 1192 if (!KernelStaticGlobalized) { 1193 KernelStaticGlobalized = new llvm::GlobalVariable( 1194 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false, 1195 llvm::GlobalValue::InternalLinkage, 1196 llvm::ConstantPointerNull::get(CGM.VoidPtrTy), 1197 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr, 1198 llvm::GlobalValue::NotThreadLocal, 1199 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared)); 1200 } 1201 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1202 IsOffloadEntry, CodeGen); 1203 IsInTTDRegion = false; 1204 1205 // Now change the name of the worker function to correspond to this target 1206 // region's entry function. 1207 WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker")); 1208 1209 // Create the worker function 1210 emitWorkerFunction(WST); 1211 } 1212 1213 // Setup NVPTX threads for master-worker OpenMP scheme. 1214 void CGOpenMPRuntimeGPU::emitNonSPMDEntryHeader(CodeGenFunction &CGF, 1215 EntryFunctionState &EST, 1216 WorkerFunctionState &WST) { 1217 CGBuilderTy &Bld = CGF.Builder; 1218 1219 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker"); 1220 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck"); 1221 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master"); 1222 EST.ExitBB = CGF.createBasicBlock(".exit"); 1223 1224 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1225 llvm::Value *IsWorker = 1226 Bld.CreateICmpULT(RT.getGPUThreadID(CGF), getThreadLimit(CGF)); 1227 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB); 1228 1229 CGF.EmitBlock(WorkerBB); 1230 emitCall(CGF, WST.Loc, WST.WorkerFn); 1231 CGF.EmitBranch(EST.ExitBB); 1232 1233 CGF.EmitBlock(MasterCheckBB); 1234 llvm::Value *IsMaster = 1235 Bld.CreateICmpEQ(RT.getGPUThreadID(CGF), getMasterThreadID(CGF)); 1236 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB); 1237 1238 CGF.EmitBlock(MasterBB); 1239 IsInTargetMasterThreadRegion = true; 1240 // SEQUENTIAL (MASTER) REGION START 1241 // First action in sequential region: 1242 // Initialize the state of the OpenMP runtime library on the GPU. 1243 // TODO: Optimize runtime initialization and pass in correct value. 1244 llvm::Value *Args[] = {getThreadLimit(CGF), 1245 Bld.getInt16(/*RequiresOMPRuntime=*/1)}; 1246 CGF.EmitRuntimeCall( 1247 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args); 1248 1249 // For data sharing, we need to initialize the stack. 1250 CGF.EmitRuntimeCall( 1251 createNVPTXRuntimeFunction( 1252 OMPRTL_NVPTX__kmpc_data_sharing_init_stack)); 1253 1254 emitGenericVarsProlog(CGF, WST.Loc); 1255 } 1256 1257 void CGOpenMPRuntimeGPU::emitNonSPMDEntryFooter(CodeGenFunction &CGF, 1258 EntryFunctionState &EST) { 1259 IsInTargetMasterThreadRegion = false; 1260 if (!CGF.HaveInsertPoint()) 1261 return; 1262 1263 emitGenericVarsEpilog(CGF); 1264 1265 if (!EST.ExitBB) 1266 EST.ExitBB = CGF.createBasicBlock(".exit"); 1267 1268 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier"); 1269 CGF.EmitBranch(TerminateBB); 1270 1271 CGF.EmitBlock(TerminateBB); 1272 // Signal termination condition. 1273 // TODO: Optimize runtime initialization and pass in correct value. 1274 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)}; 1275 CGF.EmitRuntimeCall( 1276 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args); 1277 // Barrier to terminate worker threads. 1278 syncCTAThreads(CGF); 1279 // Master thread jumps to exit point. 1280 CGF.EmitBranch(EST.ExitBB); 1281 1282 CGF.EmitBlock(EST.ExitBB); 1283 EST.ExitBB = nullptr; 1284 } 1285 1286 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D, 1287 StringRef ParentName, 1288 llvm::Function *&OutlinedFn, 1289 llvm::Constant *&OutlinedFnID, 1290 bool IsOffloadEntry, 1291 const RegionCodeGenTy &CodeGen) { 1292 ExecutionRuntimeModesRAII ModeRAII( 1293 CurrentExecutionMode, RequiresFullRuntime, 1294 CGM.getLangOpts().OpenMPCUDAForceFullRuntime || 1295 !supportsLightweightRuntime(CGM.getContext(), D)); 1296 EntryFunctionState EST; 1297 1298 // Emit target region as a standalone region. 1299 class NVPTXPrePostActionTy : public PrePostActionTy { 1300 CGOpenMPRuntimeGPU &RT; 1301 CGOpenMPRuntimeGPU::EntryFunctionState &EST; 1302 const OMPExecutableDirective &D; 1303 1304 public: 1305 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT, 1306 CGOpenMPRuntimeGPU::EntryFunctionState &EST, 1307 const OMPExecutableDirective &D) 1308 : RT(RT), EST(EST), D(D) {} 1309 void Enter(CodeGenFunction &CGF) override { 1310 RT.emitSPMDEntryHeader(CGF, EST, D); 1311 // Skip target region initialization. 1312 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1313 } 1314 void Exit(CodeGenFunction &CGF) override { 1315 RT.clearLocThreadIdInsertPt(CGF); 1316 RT.emitSPMDEntryFooter(CGF, EST); 1317 } 1318 } Action(*this, EST, D); 1319 CodeGen.setAction(Action); 1320 IsInTTDRegion = true; 1321 // Reserve place for the globalized memory. 1322 GlobalizedRecords.emplace_back(); 1323 if (!KernelStaticGlobalized) { 1324 KernelStaticGlobalized = new llvm::GlobalVariable( 1325 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false, 1326 llvm::GlobalValue::InternalLinkage, 1327 llvm::ConstantPointerNull::get(CGM.VoidPtrTy), 1328 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr, 1329 llvm::GlobalValue::NotThreadLocal, 1330 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared)); 1331 } 1332 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1333 IsOffloadEntry, CodeGen); 1334 IsInTTDRegion = false; 1335 } 1336 1337 void CGOpenMPRuntimeGPU::emitSPMDEntryHeader( 1338 CodeGenFunction &CGF, EntryFunctionState &EST, 1339 const OMPExecutableDirective &D) { 1340 CGBuilderTy &Bld = CGF.Builder; 1341 1342 // Setup BBs in entry function. 1343 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute"); 1344 EST.ExitBB = CGF.createBasicBlock(".exit"); 1345 1346 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true), 1347 /*RequiresOMPRuntime=*/ 1348 Bld.getInt16(RequiresFullRuntime ? 1 : 0)}; 1349 CGF.EmitRuntimeCall( 1350 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args); 1351 1352 if (RequiresFullRuntime) { 1353 // For data sharing, we need to initialize the stack. 1354 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 1355 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd)); 1356 } 1357 1358 CGF.EmitBranch(ExecuteBB); 1359 1360 CGF.EmitBlock(ExecuteBB); 1361 1362 IsInTargetMasterThreadRegion = true; 1363 } 1364 1365 void CGOpenMPRuntimeGPU::emitSPMDEntryFooter(CodeGenFunction &CGF, 1366 EntryFunctionState &EST) { 1367 IsInTargetMasterThreadRegion = false; 1368 if (!CGF.HaveInsertPoint()) 1369 return; 1370 1371 if (!EST.ExitBB) 1372 EST.ExitBB = CGF.createBasicBlock(".exit"); 1373 1374 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit"); 1375 CGF.EmitBranch(OMPDeInitBB); 1376 1377 CGF.EmitBlock(OMPDeInitBB); 1378 // DeInitialize the OMP state in the runtime; called by all active threads. 1379 llvm::Value *Args[] = {/*RequiresOMPRuntime=*/ 1380 CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)}; 1381 CGF.EmitRuntimeCall( 1382 createNVPTXRuntimeFunction( 1383 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args); 1384 CGF.EmitBranch(EST.ExitBB); 1385 1386 CGF.EmitBlock(EST.ExitBB); 1387 EST.ExitBB = nullptr; 1388 } 1389 1390 // Create a unique global variable to indicate the execution mode of this target 1391 // region. The execution mode is either 'generic', or 'spmd' depending on the 1392 // target directive. This variable is picked up by the offload library to setup 1393 // the device appropriately before kernel launch. If the execution mode is 1394 // 'generic', the runtime reserves one warp for the master, otherwise, all 1395 // warps participate in parallel work. 1396 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name, 1397 bool Mode) { 1398 auto *GVMode = 1399 new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, 1400 llvm::GlobalValue::WeakAnyLinkage, 1401 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1), 1402 Twine(Name, "_exec_mode")); 1403 CGM.addCompilerUsedGlobal(GVMode); 1404 } 1405 1406 void CGOpenMPRuntimeGPU::emitWorkerFunction(WorkerFunctionState &WST) { 1407 ASTContext &Ctx = CGM.getContext(); 1408 1409 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 1410 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {}, 1411 WST.Loc, WST.Loc); 1412 emitWorkerLoop(CGF, WST); 1413 CGF.FinishFunction(); 1414 } 1415 1416 void CGOpenMPRuntimeGPU::emitWorkerLoop(CodeGenFunction &CGF, 1417 WorkerFunctionState &WST) { 1418 // 1419 // The workers enter this loop and wait for parallel work from the master. 1420 // When the master encounters a parallel region it sets up the work + variable 1421 // arguments, and wakes up the workers. The workers first check to see if 1422 // they are required for the parallel region, i.e., within the # of requested 1423 // parallel threads. The activated workers load the variable arguments and 1424 // execute the parallel work. 1425 // 1426 1427 CGBuilderTy &Bld = CGF.Builder; 1428 1429 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work"); 1430 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers"); 1431 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel"); 1432 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel"); 1433 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel"); 1434 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 1435 1436 CGF.EmitBranch(AwaitBB); 1437 1438 // Workers wait for work from master. 1439 CGF.EmitBlock(AwaitBB); 1440 // Wait for parallel work 1441 syncCTAThreads(CGF); 1442 1443 Address WorkFn = 1444 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn"); 1445 Address ExecStatus = 1446 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status"); 1447 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0)); 1448 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy)); 1449 1450 // TODO: Optimize runtime initialization and pass in correct value. 1451 llvm::Value *Args[] = {WorkFn.getPointer()}; 1452 llvm::Value *Ret = CGF.EmitRuntimeCall( 1453 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args); 1454 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus); 1455 1456 // On termination condition (workid == 0), exit loop. 1457 llvm::Value *WorkID = Bld.CreateLoad(WorkFn); 1458 llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate"); 1459 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB); 1460 1461 // Activate requested workers. 1462 CGF.EmitBlock(SelectWorkersBB); 1463 llvm::Value *IsActive = 1464 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active"); 1465 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB); 1466 1467 // Signal start of parallel region. 1468 CGF.EmitBlock(ExecuteBB); 1469 // Skip initialization. 1470 setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1471 1472 // Process work items: outlined parallel functions. 1473 for (llvm::Function *W : Work) { 1474 // Try to match this outlined function. 1475 llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy); 1476 1477 llvm::Value *WorkFnMatch = 1478 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match"); 1479 1480 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn"); 1481 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next"); 1482 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB); 1483 1484 // Execute this outlined function. 1485 CGF.EmitBlock(ExecuteFNBB); 1486 1487 // Insert call to work function via shared wrapper. The shared 1488 // wrapper takes two arguments: 1489 // - the parallelism level; 1490 // - the thread ID; 1491 emitCall(CGF, WST.Loc, W, 1492 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)}); 1493 1494 // Go to end of parallel region. 1495 CGF.EmitBranch(TerminateBB); 1496 1497 CGF.EmitBlock(CheckNextBB); 1498 } 1499 // Default case: call to outlined function through pointer if the target 1500 // region makes a declare target call that may contain an orphaned parallel 1501 // directive. 1502 auto *ParallelFnTy = 1503 llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty}, 1504 /*isVarArg=*/false); 1505 llvm::Value *WorkFnCast = 1506 Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo()); 1507 // Insert call to work function via shared wrapper. The shared 1508 // wrapper takes two arguments: 1509 // - the parallelism level; 1510 // - the thread ID; 1511 emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast}, 1512 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)}); 1513 // Go to end of parallel region. 1514 CGF.EmitBranch(TerminateBB); 1515 1516 // Signal end of parallel region. 1517 CGF.EmitBlock(TerminateBB); 1518 CGF.EmitRuntimeCall( 1519 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel), 1520 llvm::None); 1521 CGF.EmitBranch(BarrierBB); 1522 1523 // All active and inactive workers wait at a barrier after parallel region. 1524 CGF.EmitBlock(BarrierBB); 1525 // Barrier after parallel region. 1526 syncCTAThreads(CGF); 1527 CGF.EmitBranch(AwaitBB); 1528 1529 // Exit target region. 1530 CGF.EmitBlock(ExitBB); 1531 // Skip initialization. 1532 clearLocThreadIdInsertPt(CGF); 1533 } 1534 1535 /// Returns specified OpenMP runtime function for the current OpenMP 1536 /// implementation. Specialized for the NVPTX device. 1537 /// \param Function OpenMP runtime function. 1538 /// \return Specified function. 1539 llvm::FunctionCallee 1540 CGOpenMPRuntimeGPU::createNVPTXRuntimeFunction(unsigned Function) { 1541 llvm::FunctionCallee RTLFn = nullptr; 1542 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) { 1543 case OMPRTL_NVPTX__kmpc_kernel_init: { 1544 // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t 1545 // RequiresOMPRuntime); 1546 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty}; 1547 auto *FnTy = 1548 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1549 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init"); 1550 break; 1551 } 1552 case OMPRTL_NVPTX__kmpc_kernel_deinit: { 1553 // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized); 1554 llvm::Type *TypeParams[] = {CGM.Int16Ty}; 1555 auto *FnTy = 1556 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1557 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit"); 1558 break; 1559 } 1560 case OMPRTL_NVPTX__kmpc_spmd_kernel_init: { 1561 // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit, 1562 // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing); 1563 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty}; 1564 auto *FnTy = 1565 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1566 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init"); 1567 break; 1568 } 1569 case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: { 1570 // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime); 1571 llvm::Type *TypeParams[] = {CGM.Int16Ty}; 1572 auto *FnTy = 1573 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1574 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2"); 1575 break; 1576 } 1577 case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: { 1578 /// Build void __kmpc_kernel_prepare_parallel( 1579 /// void *outlined_function); 1580 llvm::Type *TypeParams[] = {CGM.Int8PtrTy}; 1581 auto *FnTy = 1582 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1583 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel"); 1584 break; 1585 } 1586 case OMPRTL_NVPTX__kmpc_kernel_parallel: { 1587 /// Build bool __kmpc_kernel_parallel(void **outlined_function); 1588 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy}; 1589 llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy); 1590 auto *FnTy = 1591 llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false); 1592 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel"); 1593 break; 1594 } 1595 case OMPRTL_NVPTX__kmpc_kernel_end_parallel: { 1596 /// Build void __kmpc_kernel_end_parallel(); 1597 auto *FnTy = 1598 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1599 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel"); 1600 break; 1601 } 1602 case OMPRTL_NVPTX__kmpc_serialized_parallel: { 1603 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 1604 // global_tid); 1605 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1606 auto *FnTy = 1607 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1608 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel"); 1609 break; 1610 } 1611 case OMPRTL_NVPTX__kmpc_end_serialized_parallel: { 1612 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 1613 // global_tid); 1614 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1615 auto *FnTy = 1616 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1617 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel"); 1618 break; 1619 } 1620 case OMPRTL_NVPTX__kmpc_shuffle_int32: { 1621 // Build int32_t __kmpc_shuffle_int32(int32_t element, 1622 // int16_t lane_offset, int16_t warp_size); 1623 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty}; 1624 auto *FnTy = 1625 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 1626 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32"); 1627 break; 1628 } 1629 case OMPRTL_NVPTX__kmpc_shuffle_int64: { 1630 // Build int64_t __kmpc_shuffle_int64(int64_t element, 1631 // int16_t lane_offset, int16_t warp_size); 1632 llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty}; 1633 auto *FnTy = 1634 llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false); 1635 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64"); 1636 break; 1637 } 1638 case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: { 1639 // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, 1640 // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void* 1641 // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t 1642 // lane_id, int16_t lane_offset, int16_t Algorithm Version), void 1643 // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num)); 1644 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty, 1645 CGM.Int16Ty, CGM.Int16Ty}; 1646 auto *ShuffleReduceFnTy = 1647 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams, 1648 /*isVarArg=*/false); 1649 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty}; 1650 auto *InterWarpCopyFnTy = 1651 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams, 1652 /*isVarArg=*/false); 1653 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), 1654 CGM.Int32Ty, 1655 CGM.Int32Ty, 1656 CGM.SizeTy, 1657 CGM.VoidPtrTy, 1658 ShuffleReduceFnTy->getPointerTo(), 1659 InterWarpCopyFnTy->getPointerTo()}; 1660 auto *FnTy = 1661 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1662 RTLFn = CGM.CreateRuntimeFunction( 1663 FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2"); 1664 break; 1665 } 1666 case OMPRTL_NVPTX__kmpc_end_reduce_nowait: { 1667 // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid); 1668 llvm::Type *TypeParams[] = {CGM.Int32Ty}; 1669 auto *FnTy = 1670 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1671 RTLFn = CGM.CreateRuntimeFunction( 1672 FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait"); 1673 break; 1674 } 1675 case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: { 1676 // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32 1677 // global_tid, void *global_buffer, int32_t num_of_records, void* 1678 // reduce_data, 1679 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t 1680 // lane_offset, int16_t shortCircuit), 1681 // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void 1682 // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data), 1683 // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx, 1684 // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer, 1685 // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void 1686 // *buffer, int idx, void *reduce_data)); 1687 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty, 1688 CGM.Int16Ty, CGM.Int16Ty}; 1689 auto *ShuffleReduceFnTy = 1690 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams, 1691 /*isVarArg=*/false); 1692 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty}; 1693 auto *InterWarpCopyFnTy = 1694 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams, 1695 /*isVarArg=*/false); 1696 llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy, 1697 CGM.VoidPtrTy}; 1698 auto *GlobalListFnTy = 1699 llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams, 1700 /*isVarArg=*/false); 1701 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), 1702 CGM.Int32Ty, 1703 CGM.VoidPtrTy, 1704 CGM.Int32Ty, 1705 CGM.VoidPtrTy, 1706 ShuffleReduceFnTy->getPointerTo(), 1707 InterWarpCopyFnTy->getPointerTo(), 1708 GlobalListFnTy->getPointerTo(), 1709 GlobalListFnTy->getPointerTo(), 1710 GlobalListFnTy->getPointerTo(), 1711 GlobalListFnTy->getPointerTo()}; 1712 auto *FnTy = 1713 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1714 RTLFn = CGM.CreateRuntimeFunction( 1715 FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2"); 1716 break; 1717 } 1718 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: { 1719 /// Build void __kmpc_data_sharing_init_stack(); 1720 auto *FnTy = 1721 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1722 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack"); 1723 break; 1724 } 1725 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: { 1726 /// Build void __kmpc_data_sharing_init_stack_spmd(); 1727 auto *FnTy = 1728 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1729 RTLFn = 1730 CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd"); 1731 break; 1732 } 1733 case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: { 1734 // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size, 1735 // int16_t UseSharedMemory); 1736 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty}; 1737 auto *FnTy = 1738 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); 1739 RTLFn = CGM.CreateRuntimeFunction( 1740 FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack"); 1741 break; 1742 } 1743 case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: { 1744 // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t 1745 // UseSharedMemory); 1746 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty}; 1747 auto *FnTy = 1748 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); 1749 RTLFn = CGM.CreateRuntimeFunction( 1750 FnTy, /*Name=*/"__kmpc_data_sharing_push_stack"); 1751 break; 1752 } 1753 case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: { 1754 // Build void __kmpc_data_sharing_pop_stack(void *a); 1755 llvm::Type *TypeParams[] = {CGM.VoidPtrTy}; 1756 auto *FnTy = 1757 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1758 RTLFn = CGM.CreateRuntimeFunction(FnTy, 1759 /*Name=*/"__kmpc_data_sharing_pop_stack"); 1760 break; 1761 } 1762 case OMPRTL_NVPTX__kmpc_begin_sharing_variables: { 1763 /// Build void __kmpc_begin_sharing_variables(void ***args, 1764 /// size_t n_args); 1765 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy}; 1766 auto *FnTy = 1767 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1768 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables"); 1769 break; 1770 } 1771 case OMPRTL_NVPTX__kmpc_end_sharing_variables: { 1772 /// Build void __kmpc_end_sharing_variables(); 1773 auto *FnTy = 1774 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1775 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables"); 1776 break; 1777 } 1778 case OMPRTL_NVPTX__kmpc_get_shared_variables: { 1779 /// Build void __kmpc_get_shared_variables(void ***GlobalArgs); 1780 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()}; 1781 auto *FnTy = 1782 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1783 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables"); 1784 break; 1785 } 1786 case OMPRTL_NVPTX__kmpc_parallel_level: { 1787 // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid); 1788 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1789 auto *FnTy = 1790 llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false); 1791 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level"); 1792 break; 1793 } 1794 case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: { 1795 // Build int8_t __kmpc_is_spmd_exec_mode(); 1796 auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false); 1797 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode"); 1798 break; 1799 } 1800 case OMPRTL_NVPTX__kmpc_get_team_static_memory: { 1801 // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode, 1802 // const void *buf, size_t size, int16_t is_shared, const void **res); 1803 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy, 1804 CGM.Int16Ty, CGM.VoidPtrPtrTy}; 1805 auto *FnTy = 1806 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1807 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory"); 1808 break; 1809 } 1810 case OMPRTL_NVPTX__kmpc_restore_team_static_memory: { 1811 // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode, 1812 // int16_t is_shared); 1813 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty}; 1814 auto *FnTy = 1815 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1816 RTLFn = 1817 CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory"); 1818 break; 1819 } 1820 case OMPRTL__kmpc_barrier: { 1821 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid); 1822 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1823 auto *FnTy = 1824 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1825 RTLFn = 1826 CGM.CreateConvergentRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier"); 1827 break; 1828 } 1829 case OMPRTL__kmpc_barrier_simple_spmd: { 1830 // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32 1831 // global_tid); 1832 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1833 auto *FnTy = 1834 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1835 RTLFn = CGM.CreateConvergentRuntimeFunction( 1836 FnTy, /*Name*/ "__kmpc_barrier_simple_spmd"); 1837 break; 1838 } 1839 case OMPRTL_NVPTX__kmpc_warp_active_thread_mask: { 1840 // Build int32_t __kmpc_warp_active_thread_mask(void); 1841 auto *FnTy = 1842 llvm::FunctionType::get(CGM.Int32Ty, llvm::None, /*isVarArg=*/false); 1843 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_warp_active_thread_mask"); 1844 break; 1845 } 1846 case OMPRTL_NVPTX__kmpc_syncwarp: { 1847 // Build void __kmpc_syncwarp(kmp_int32 Mask); 1848 auto *FnTy = 1849 llvm::FunctionType::get(CGM.VoidTy, CGM.Int32Ty, /*isVarArg=*/false); 1850 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_syncwarp"); 1851 break; 1852 } 1853 } 1854 return RTLFn; 1855 } 1856 1857 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID, 1858 llvm::Constant *Addr, 1859 uint64_t Size, int32_t, 1860 llvm::GlobalValue::LinkageTypes) { 1861 // TODO: Add support for global variables on the device after declare target 1862 // support. 1863 if (!isa<llvm::Function>(Addr)) 1864 return; 1865 llvm::Module &M = CGM.getModule(); 1866 llvm::LLVMContext &Ctx = CGM.getLLVMContext(); 1867 1868 // Get "nvvm.annotations" metadata node 1869 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 1870 1871 llvm::Metadata *MDVals[] = { 1872 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"), 1873 llvm::ConstantAsMetadata::get( 1874 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))}; 1875 // Append metadata to nvvm.annotations 1876 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 1877 } 1878 1879 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction( 1880 const OMPExecutableDirective &D, StringRef ParentName, 1881 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, 1882 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { 1883 if (!IsOffloadEntry) // Nothing to do. 1884 return; 1885 1886 assert(!ParentName.empty() && "Invalid target region parent name!"); 1887 1888 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D); 1889 if (Mode) 1890 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1891 CodeGen); 1892 else 1893 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1894 CodeGen); 1895 1896 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode); 1897 } 1898 1899 namespace { 1900 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); 1901 /// Enum for accesseing the reserved_2 field of the ident_t struct. 1902 enum ModeFlagsTy : unsigned { 1903 /// Bit set to 1 when in SPMD mode. 1904 KMP_IDENT_SPMD_MODE = 0x01, 1905 /// Bit set to 1 when a simplified runtime is used. 1906 KMP_IDENT_SIMPLE_RT_MODE = 0x02, 1907 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE) 1908 }; 1909 1910 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime. 1911 static const ModeFlagsTy UndefinedMode = 1912 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE; 1913 } // anonymous namespace 1914 1915 unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const { 1916 switch (getExecutionMode()) { 1917 case EM_SPMD: 1918 if (requiresFullRuntime()) 1919 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE); 1920 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE; 1921 case EM_NonSPMD: 1922 assert(requiresFullRuntime() && "Expected full runtime."); 1923 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE); 1924 case EM_Unknown: 1925 return UndefinedMode; 1926 } 1927 llvm_unreachable("Unknown flags are requested."); 1928 } 1929 1930 CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM) 1931 : CGOpenMPRuntime(CGM, "_", "$") { 1932 if (!CGM.getLangOpts().OpenMPIsDevice) 1933 llvm_unreachable("OpenMP NVPTX can only handle device code."); 1934 } 1935 1936 void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF, 1937 ProcBindKind ProcBind, 1938 SourceLocation Loc) { 1939 // Do nothing in case of SPMD mode and L0 parallel. 1940 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 1941 return; 1942 1943 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc); 1944 } 1945 1946 void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF, 1947 llvm::Value *NumThreads, 1948 SourceLocation Loc) { 1949 // Do nothing in case of SPMD mode and L0 parallel. 1950 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 1951 return; 1952 1953 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc); 1954 } 1955 1956 void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF, 1957 const Expr *NumTeams, 1958 const Expr *ThreadLimit, 1959 SourceLocation Loc) {} 1960 1961 llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction( 1962 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1963 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1964 // Emit target region as a standalone region. 1965 class NVPTXPrePostActionTy : public PrePostActionTy { 1966 bool &IsInParallelRegion; 1967 bool PrevIsInParallelRegion; 1968 1969 public: 1970 NVPTXPrePostActionTy(bool &IsInParallelRegion) 1971 : IsInParallelRegion(IsInParallelRegion) {} 1972 void Enter(CodeGenFunction &CGF) override { 1973 PrevIsInParallelRegion = IsInParallelRegion; 1974 IsInParallelRegion = true; 1975 } 1976 void Exit(CodeGenFunction &CGF) override { 1977 IsInParallelRegion = PrevIsInParallelRegion; 1978 } 1979 } Action(IsInParallelRegion); 1980 CodeGen.setAction(Action); 1981 bool PrevIsInTTDRegion = IsInTTDRegion; 1982 IsInTTDRegion = false; 1983 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion; 1984 IsInTargetMasterThreadRegion = false; 1985 auto *OutlinedFun = 1986 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction( 1987 D, ThreadIDVar, InnermostKind, CodeGen)); 1988 if (CGM.getLangOpts().Optimize) { 1989 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline); 1990 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone); 1991 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline); 1992 } 1993 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion; 1994 IsInTTDRegion = PrevIsInTTDRegion; 1995 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD && 1996 !IsInParallelRegion) { 1997 llvm::Function *WrapperFun = 1998 createParallelDataSharingWrapper(OutlinedFun, D); 1999 WrapperFunctionsMap[OutlinedFun] = WrapperFun; 2000 } 2001 2002 return OutlinedFun; 2003 } 2004 2005 /// Get list of lastprivate variables from the teams distribute ... or 2006 /// teams {distribute ...} directives. 2007 static void 2008 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D, 2009 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 2010 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 2011 "expected teams directive."); 2012 const OMPExecutableDirective *Dir = &D; 2013 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 2014 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild( 2015 Ctx, 2016 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers( 2017 /*IgnoreCaptured=*/true))) { 2018 Dir = dyn_cast_or_null<OMPExecutableDirective>(S); 2019 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind())) 2020 Dir = nullptr; 2021 } 2022 } 2023 if (!Dir) 2024 return; 2025 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) { 2026 for (const Expr *E : C->getVarRefs()) 2027 Vars.push_back(getPrivateItem(E)); 2028 } 2029 } 2030 2031 /// Get list of reduction variables from the teams ... directives. 2032 static void 2033 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D, 2034 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 2035 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 2036 "expected teams directive."); 2037 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 2038 for (const Expr *E : C->privates()) 2039 Vars.push_back(getPrivateItem(E)); 2040 } 2041 } 2042 2043 llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction( 2044 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 2045 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 2046 SourceLocation Loc = D.getBeginLoc(); 2047 2048 const RecordDecl *GlobalizedRD = nullptr; 2049 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions; 2050 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 2051 unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size); 2052 // Globalize team reductions variable unconditionally in all modes. 2053 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 2054 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions); 2055 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { 2056 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions); 2057 if (!LastPrivatesReductions.empty()) { 2058 GlobalizedRD = ::buildRecordForGlobalizedVars( 2059 CGM.getContext(), llvm::None, LastPrivatesReductions, 2060 MappedDeclsFields, WarpSize); 2061 } 2062 } else if (!LastPrivatesReductions.empty()) { 2063 assert(!TeamAndReductions.first && 2064 "Previous team declaration is not expected."); 2065 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl(); 2066 std::swap(TeamAndReductions.second, LastPrivatesReductions); 2067 } 2068 2069 // Emit target region as a standalone region. 2070 class NVPTXPrePostActionTy : public PrePostActionTy { 2071 SourceLocation &Loc; 2072 const RecordDecl *GlobalizedRD; 2073 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2074 &MappedDeclsFields; 2075 2076 public: 2077 NVPTXPrePostActionTy( 2078 SourceLocation &Loc, const RecordDecl *GlobalizedRD, 2079 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2080 &MappedDeclsFields) 2081 : Loc(Loc), GlobalizedRD(GlobalizedRD), 2082 MappedDeclsFields(MappedDeclsFields) {} 2083 void Enter(CodeGenFunction &CGF) override { 2084 auto &Rt = 2085 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 2086 if (GlobalizedRD) { 2087 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 2088 I->getSecond().GlobalRecord = GlobalizedRD; 2089 I->getSecond().MappedParams = 2090 std::make_unique<CodeGenFunction::OMPMapVars>(); 2091 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 2092 for (const auto &Pair : MappedDeclsFields) { 2093 assert(Pair.getFirst()->isCanonicalDecl() && 2094 "Expected canonical declaration"); 2095 Data.insert(std::make_pair(Pair.getFirst(), 2096 MappedVarData(Pair.getSecond(), 2097 /*IsOnePerTeam=*/true))); 2098 } 2099 } 2100 Rt.emitGenericVarsProlog(CGF, Loc); 2101 } 2102 void Exit(CodeGenFunction &CGF) override { 2103 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) 2104 .emitGenericVarsEpilog(CGF); 2105 } 2106 } Action(Loc, GlobalizedRD, MappedDeclsFields); 2107 CodeGen.setAction(Action); 2108 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction( 2109 D, ThreadIDVar, InnermostKind, CodeGen); 2110 if (CGM.getLangOpts().Optimize) { 2111 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline); 2112 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone); 2113 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline); 2114 } 2115 2116 return OutlinedFun; 2117 } 2118 2119 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF, 2120 SourceLocation Loc, 2121 bool WithSPMDCheck) { 2122 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic && 2123 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 2124 return; 2125 2126 CGBuilderTy &Bld = CGF.Builder; 2127 2128 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 2129 if (I == FunctionGlobalizedDecls.end()) 2130 return; 2131 if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) { 2132 QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord); 2133 QualType SecGlobalRecTy; 2134 2135 // Recover pointer to this function's global record. The runtime will 2136 // handle the specifics of the allocation of the memory. 2137 // Use actual memory size of the record including the padding 2138 // for alignment purposes. 2139 unsigned Alignment = 2140 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity(); 2141 unsigned GlobalRecordSize = 2142 CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity(); 2143 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment); 2144 2145 llvm::PointerType *GlobalRecPtrTy = 2146 CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo(); 2147 llvm::Value *GlobalRecCastAddr; 2148 llvm::Value *IsTTD = nullptr; 2149 if (!IsInTTDRegion && 2150 (WithSPMDCheck || 2151 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) { 2152 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 2153 llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd"); 2154 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd"); 2155 if (I->getSecond().SecondaryGlobalRecord.hasValue()) { 2156 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2157 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2158 llvm::Value *PL = CGF.EmitRuntimeCall( 2159 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level), 2160 {RTLoc, ThreadID}); 2161 IsTTD = Bld.CreateIsNull(PL); 2162 } 2163 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall( 2164 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode))); 2165 Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB); 2166 // There is no need to emit line number for unconditional branch. 2167 (void)ApplyDebugLocation::CreateEmpty(CGF); 2168 CGF.EmitBlock(SPMDBB); 2169 Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy), 2170 CharUnits::fromQuantity(Alignment)); 2171 CGF.EmitBranch(ExitBB); 2172 // There is no need to emit line number for unconditional branch. 2173 (void)ApplyDebugLocation::CreateEmpty(CGF); 2174 CGF.EmitBlock(NonSPMDBB); 2175 llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize); 2176 if (const RecordDecl *SecGlobalizedVarsRecord = 2177 I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) { 2178 SecGlobalRecTy = 2179 CGM.getContext().getRecordType(SecGlobalizedVarsRecord); 2180 2181 // Recover pointer to this function's global record. The runtime will 2182 // handle the specifics of the allocation of the memory. 2183 // Use actual memory size of the record including the padding 2184 // for alignment purposes. 2185 unsigned Alignment = 2186 CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity(); 2187 unsigned GlobalRecordSize = 2188 CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity(); 2189 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment); 2190 Size = Bld.CreateSelect( 2191 IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size); 2192 } 2193 // TODO: allow the usage of shared memory to be controlled by 2194 // the user, for now, default to global. 2195 llvm::Value *GlobalRecordSizeArg[] = { 2196 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)}; 2197 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall( 2198 createNVPTXRuntimeFunction( 2199 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack), 2200 GlobalRecordSizeArg); 2201 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2202 GlobalRecValue, GlobalRecPtrTy); 2203 CGF.EmitBlock(ExitBB); 2204 auto *Phi = Bld.CreatePHI(GlobalRecPtrTy, 2205 /*NumReservedValues=*/2, "_select_stack"); 2206 Phi->addIncoming(RecPtr.getPointer(), SPMDBB); 2207 Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB); 2208 GlobalRecCastAddr = Phi; 2209 I->getSecond().GlobalRecordAddr = Phi; 2210 I->getSecond().IsInSPMDModeFlag = IsSPMD; 2211 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) { 2212 assert(GlobalizedRecords.back().Records.size() < 2 && 2213 "Expected less than 2 globalized records: one for target and one " 2214 "for teams."); 2215 unsigned Offset = 0; 2216 for (const RecordDecl *RD : GlobalizedRecords.back().Records) { 2217 QualType RDTy = CGM.getContext().getRecordType(RD); 2218 unsigned Alignment = 2219 CGM.getContext().getTypeAlignInChars(RDTy).getQuantity(); 2220 unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity(); 2221 Offset = 2222 llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment); 2223 } 2224 unsigned Alignment = 2225 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity(); 2226 Offset = llvm::alignTo(Offset, Alignment); 2227 GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord); 2228 ++GlobalizedRecords.back().RegionCounter; 2229 if (GlobalizedRecords.back().Records.size() == 1) { 2230 assert(KernelStaticGlobalized && 2231 "Kernel static pointer must be initialized already."); 2232 auto *UseSharedMemory = new llvm::GlobalVariable( 2233 CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true, 2234 llvm::GlobalValue::InternalLinkage, nullptr, 2235 "_openmp_static_kernel$is_shared"); 2236 UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2237 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth( 2238 /*DestWidth=*/16, /*Signed=*/0); 2239 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar( 2240 Address(UseSharedMemory, 2241 CGM.getContext().getTypeAlignInChars(Int16Ty)), 2242 /*Volatile=*/false, Int16Ty, Loc); 2243 auto *StaticGlobalized = new llvm::GlobalVariable( 2244 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 2245 llvm::GlobalValue::CommonLinkage, nullptr); 2246 auto *RecSize = new llvm::GlobalVariable( 2247 CGM.getModule(), CGM.SizeTy, /*isConstant=*/true, 2248 llvm::GlobalValue::InternalLinkage, nullptr, 2249 "_openmp_static_kernel$size"); 2250 RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2251 llvm::Value *Ld = CGF.EmitLoadOfScalar( 2252 Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false, 2253 CGM.getContext().getSizeType(), Loc); 2254 llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2255 KernelStaticGlobalized, CGM.VoidPtrPtrTy); 2256 llvm::Value *GlobalRecordSizeArg[] = { 2257 llvm::ConstantInt::get( 2258 CGM.Int16Ty, 2259 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0), 2260 StaticGlobalized, Ld, IsInSharedMemory, ResAddr}; 2261 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 2262 OMPRTL_NVPTX__kmpc_get_team_static_memory), 2263 GlobalRecordSizeArg); 2264 GlobalizedRecords.back().Buffer = StaticGlobalized; 2265 GlobalizedRecords.back().RecSize = RecSize; 2266 GlobalizedRecords.back().UseSharedMemory = UseSharedMemory; 2267 GlobalizedRecords.back().Loc = Loc; 2268 } 2269 assert(KernelStaticGlobalized && "Global address must be set already."); 2270 Address FrameAddr = CGF.EmitLoadOfPointer( 2271 Address(KernelStaticGlobalized, CGM.getPointerAlign()), 2272 CGM.getContext() 2273 .getPointerType(CGM.getContext().VoidPtrTy) 2274 .castAs<PointerType>()); 2275 llvm::Value *GlobalRecValue = 2276 Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer(); 2277 I->getSecond().GlobalRecordAddr = GlobalRecValue; 2278 I->getSecond().IsInSPMDModeFlag = nullptr; 2279 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2280 GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo()); 2281 } else { 2282 // TODO: allow the usage of shared memory to be controlled by 2283 // the user, for now, default to global. 2284 bool UseSharedMemory = 2285 IsInTTDRegion && GlobalRecordSize <= SharedMemorySize; 2286 llvm::Value *GlobalRecordSizeArg[] = { 2287 llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), 2288 CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)}; 2289 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall( 2290 createNVPTXRuntimeFunction( 2291 IsInTTDRegion 2292 ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack 2293 : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack), 2294 GlobalRecordSizeArg); 2295 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2296 GlobalRecValue, GlobalRecPtrTy); 2297 I->getSecond().GlobalRecordAddr = GlobalRecValue; 2298 I->getSecond().IsInSPMDModeFlag = nullptr; 2299 } 2300 LValue Base = 2301 CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy); 2302 2303 // Emit the "global alloca" which is a GEP from the global declaration 2304 // record using the pointer returned by the runtime. 2305 LValue SecBase; 2306 decltype(I->getSecond().LocalVarData)::const_iterator SecIt; 2307 if (IsTTD) { 2308 SecIt = I->getSecond().SecondaryLocalVarData->begin(); 2309 llvm::PointerType *SecGlobalRecPtrTy = 2310 CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo(); 2311 SecBase = CGF.MakeNaturalAlignPointeeAddrLValue( 2312 Bld.CreatePointerBitCastOrAddrSpaceCast( 2313 I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy), 2314 SecGlobalRecTy); 2315 } 2316 for (auto &Rec : I->getSecond().LocalVarData) { 2317 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first); 2318 llvm::Value *ParValue; 2319 if (EscapedParam) { 2320 const auto *VD = cast<VarDecl>(Rec.first); 2321 LValue ParLVal = 2322 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); 2323 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc); 2324 } 2325 LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD); 2326 // Emit VarAddr basing on lane-id if required. 2327 QualType VarTy; 2328 if (Rec.second.IsOnePerTeam) { 2329 VarTy = Rec.second.FD->getType(); 2330 } else { 2331 llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP( 2332 VarAddr.getAddress(CGF).getPointer(), 2333 {Bld.getInt32(0), getNVPTXLaneID(CGF)}); 2334 VarTy = 2335 Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType(); 2336 VarAddr = CGF.MakeAddrLValue( 2337 Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy, 2338 AlignmentSource::Decl); 2339 } 2340 Rec.second.PrivateAddr = VarAddr.getAddress(CGF); 2341 if (!IsInTTDRegion && 2342 (WithSPMDCheck || 2343 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) { 2344 assert(I->getSecond().IsInSPMDModeFlag && 2345 "Expected unknown execution mode or required SPMD check."); 2346 if (IsTTD) { 2347 assert(SecIt->second.IsOnePerTeam && 2348 "Secondary glob data must be one per team."); 2349 LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD); 2350 VarAddr.setAddress( 2351 Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF), 2352 VarAddr.getPointer(CGF)), 2353 VarAddr.getAlignment())); 2354 Rec.second.PrivateAddr = VarAddr.getAddress(CGF); 2355 } 2356 Address GlobalPtr = Rec.second.PrivateAddr; 2357 Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName()); 2358 Rec.second.PrivateAddr = Address( 2359 Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag, 2360 LocalAddr.getPointer(), GlobalPtr.getPointer()), 2361 LocalAddr.getAlignment()); 2362 } 2363 if (EscapedParam) { 2364 const auto *VD = cast<VarDecl>(Rec.first); 2365 CGF.EmitStoreOfScalar(ParValue, VarAddr); 2366 I->getSecond().MappedParams->setVarAddr(CGF, VD, 2367 VarAddr.getAddress(CGF)); 2368 } 2369 if (IsTTD) 2370 ++SecIt; 2371 } 2372 } 2373 for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) { 2374 // Recover pointer to this function's global record. The runtime will 2375 // handle the specifics of the allocation of the memory. 2376 // Use actual memory size of the record including the padding 2377 // for alignment purposes. 2378 CGBuilderTy &Bld = CGF.Builder; 2379 llvm::Value *Size = CGF.getTypeSize(VD->getType()); 2380 CharUnits Align = CGM.getContext().getDeclAlign(VD); 2381 Size = Bld.CreateNUWAdd( 2382 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1)); 2383 llvm::Value *AlignVal = 2384 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity()); 2385 Size = Bld.CreateUDiv(Size, AlignVal); 2386 Size = Bld.CreateNUWMul(Size, AlignVal); 2387 // TODO: allow the usage of shared memory to be controlled by 2388 // the user, for now, default to global. 2389 llvm::Value *GlobalRecordSizeArg[] = { 2390 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)}; 2391 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall( 2392 createNVPTXRuntimeFunction( 2393 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack), 2394 GlobalRecordSizeArg); 2395 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2396 GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo()); 2397 LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(), 2398 CGM.getContext().getDeclAlign(VD), 2399 AlignmentSource::Decl); 2400 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD), 2401 Base.getAddress(CGF)); 2402 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue); 2403 } 2404 I->getSecond().MappedParams->apply(CGF); 2405 } 2406 2407 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF, 2408 bool WithSPMDCheck) { 2409 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic && 2410 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 2411 return; 2412 2413 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 2414 if (I != FunctionGlobalizedDecls.end()) { 2415 I->getSecond().MappedParams->restore(CGF); 2416 if (!CGF.HaveInsertPoint()) 2417 return; 2418 for (llvm::Value *Addr : 2419 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) { 2420 CGF.EmitRuntimeCall( 2421 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack), 2422 Addr); 2423 } 2424 if (I->getSecond().GlobalRecordAddr) { 2425 if (!IsInTTDRegion && 2426 (WithSPMDCheck || 2427 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) { 2428 CGBuilderTy &Bld = CGF.Builder; 2429 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 2430 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd"); 2431 Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB); 2432 // There is no need to emit line number for unconditional branch. 2433 (void)ApplyDebugLocation::CreateEmpty(CGF); 2434 CGF.EmitBlock(NonSPMDBB); 2435 CGF.EmitRuntimeCall( 2436 createNVPTXRuntimeFunction( 2437 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack), 2438 CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr)); 2439 CGF.EmitBlock(ExitBB); 2440 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) { 2441 assert(GlobalizedRecords.back().RegionCounter > 0 && 2442 "region counter must be > 0."); 2443 --GlobalizedRecords.back().RegionCounter; 2444 // Emit the restore function only in the target region. 2445 if (GlobalizedRecords.back().RegionCounter == 0) { 2446 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth( 2447 /*DestWidth=*/16, /*Signed=*/0); 2448 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar( 2449 Address(GlobalizedRecords.back().UseSharedMemory, 2450 CGM.getContext().getTypeAlignInChars(Int16Ty)), 2451 /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc); 2452 llvm::Value *Args[] = { 2453 llvm::ConstantInt::get( 2454 CGM.Int16Ty, 2455 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0), 2456 IsInSharedMemory}; 2457 CGF.EmitRuntimeCall( 2458 createNVPTXRuntimeFunction( 2459 OMPRTL_NVPTX__kmpc_restore_team_static_memory), 2460 Args); 2461 } 2462 } else { 2463 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 2464 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack), 2465 I->getSecond().GlobalRecordAddr); 2466 } 2467 } 2468 } 2469 } 2470 2471 void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF, 2472 const OMPExecutableDirective &D, 2473 SourceLocation Loc, 2474 llvm::Function *OutlinedFn, 2475 ArrayRef<llvm::Value *> CapturedVars) { 2476 if (!CGF.HaveInsertPoint()) 2477 return; 2478 2479 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2480 /*Name=*/".zero.addr"); 2481 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2482 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2483 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer()); 2484 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 2485 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 2486 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); 2487 } 2488 2489 void CGOpenMPRuntimeGPU::emitParallelCall( 2490 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, 2491 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) { 2492 if (!CGF.HaveInsertPoint()) 2493 return; 2494 2495 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 2496 emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond); 2497 else 2498 emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond); 2499 } 2500 2501 void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall( 2502 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn, 2503 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) { 2504 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn); 2505 2506 // Force inline this outlined function at its call site. 2507 Fn->setLinkage(llvm::GlobalValue::InternalLinkage); 2508 2509 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2510 /*Name=*/".zero.addr"); 2511 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2512 // ThreadId for serialized parallels is 0. 2513 Address ThreadIDAddr = ZeroAddr; 2514 auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr]( 2515 CodeGenFunction &CGF, PrePostActionTy &Action) { 2516 Action.Enter(CGF); 2517 2518 Address ZeroAddr = 2519 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2520 /*Name=*/".bound.zero.addr"); 2521 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2522 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2523 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer()); 2524 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 2525 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 2526 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs); 2527 }; 2528 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF, 2529 PrePostActionTy &) { 2530 2531 RegionCodeGenTy RCG(CodeGen); 2532 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2533 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2534 llvm::Value *Args[] = {RTLoc, ThreadID}; 2535 2536 NVPTXActionTy Action( 2537 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel), 2538 Args, 2539 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel), 2540 Args); 2541 RCG.setAction(Action); 2542 RCG(CGF); 2543 }; 2544 2545 auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF, 2546 PrePostActionTy &Action) { 2547 CGBuilderTy &Bld = CGF.Builder; 2548 llvm::Function *WFn = WrapperFunctionsMap[Fn]; 2549 assert(WFn && "Wrapper function does not exist!"); 2550 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy); 2551 2552 // Prepare for parallel region. Indicate the outlined function. 2553 llvm::Value *Args[] = {ID}; 2554 CGF.EmitRuntimeCall( 2555 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel), 2556 Args); 2557 2558 // Create a private scope that will globalize the arguments 2559 // passed from the outside of the target region. 2560 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF); 2561 2562 // There's something to share. 2563 if (!CapturedVars.empty()) { 2564 // Prepare for parallel region. Indicate the outlined function. 2565 Address SharedArgs = 2566 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs"); 2567 llvm::Value *SharedArgsPtr = SharedArgs.getPointer(); 2568 2569 llvm::Value *DataSharingArgs[] = { 2570 SharedArgsPtr, 2571 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())}; 2572 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 2573 OMPRTL_NVPTX__kmpc_begin_sharing_variables), 2574 DataSharingArgs); 2575 2576 // Store variable address in a list of references to pass to workers. 2577 unsigned Idx = 0; 2578 ASTContext &Ctx = CGF.getContext(); 2579 Address SharedArgListAddress = CGF.EmitLoadOfPointer( 2580 SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy)) 2581 .castAs<PointerType>()); 2582 for (llvm::Value *V : CapturedVars) { 2583 Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 2584 llvm::Value *PtrV; 2585 if (V->getType()->isIntegerTy()) 2586 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy); 2587 else 2588 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy); 2589 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false, 2590 Ctx.getPointerType(Ctx.VoidPtrTy)); 2591 ++Idx; 2592 } 2593 } 2594 2595 // Activate workers. This barrier is used by the master to signal 2596 // work for the workers. 2597 syncCTAThreads(CGF); 2598 2599 // OpenMP [2.5, Parallel Construct, p.49] 2600 // There is an implied barrier at the end of a parallel region. After the 2601 // end of a parallel region, only the master thread of the team resumes 2602 // execution of the enclosing task region. 2603 // 2604 // The master waits at this barrier until all workers are done. 2605 syncCTAThreads(CGF); 2606 2607 if (!CapturedVars.empty()) 2608 CGF.EmitRuntimeCall( 2609 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables)); 2610 2611 // Remember for post-processing in worker loop. 2612 Work.emplace_back(WFn); 2613 }; 2614 2615 auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen]( 2616 CodeGenFunction &CGF, PrePostActionTy &Action) { 2617 if (IsInParallelRegion) { 2618 SeqGen(CGF, Action); 2619 } else if (IsInTargetMasterThreadRegion) { 2620 L0ParallelGen(CGF, Action); 2621 } else { 2622 // Check for master and then parallelism: 2623 // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) { 2624 // Serialized execution. 2625 // } else { 2626 // Worker call. 2627 // } 2628 CGBuilderTy &Bld = CGF.Builder; 2629 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 2630 llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential"); 2631 llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck"); 2632 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master"); 2633 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall( 2634 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode))); 2635 Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB); 2636 // There is no need to emit line number for unconditional branch. 2637 (void)ApplyDebugLocation::CreateEmpty(CGF); 2638 CGF.EmitBlock(ParallelCheckBB); 2639 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2640 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2641 llvm::Value *PL = CGF.EmitRuntimeCall( 2642 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level), 2643 {RTLoc, ThreadID}); 2644 llvm::Value *Res = Bld.CreateIsNotNull(PL); 2645 Bld.CreateCondBr(Res, SeqBB, MasterBB); 2646 CGF.EmitBlock(SeqBB); 2647 SeqGen(CGF, Action); 2648 CGF.EmitBranch(ExitBB); 2649 // There is no need to emit line number for unconditional branch. 2650 (void)ApplyDebugLocation::CreateEmpty(CGF); 2651 CGF.EmitBlock(MasterBB); 2652 L0ParallelGen(CGF, Action); 2653 CGF.EmitBranch(ExitBB); 2654 // There is no need to emit line number for unconditional branch. 2655 (void)ApplyDebugLocation::CreateEmpty(CGF); 2656 // Emit the continuation block for code after the if. 2657 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2658 } 2659 }; 2660 2661 if (IfCond) { 2662 emitIfClause(CGF, IfCond, LNParallelGen, SeqGen); 2663 } else { 2664 CodeGenFunction::RunCleanupsScope Scope(CGF); 2665 RegionCodeGenTy ThenRCG(LNParallelGen); 2666 ThenRCG(CGF); 2667 } 2668 } 2669 2670 void CGOpenMPRuntimeGPU::emitSPMDParallelCall( 2671 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, 2672 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) { 2673 // Just call the outlined function to execute the parallel region. 2674 // OutlinedFn(>id, &zero, CapturedStruct); 2675 // 2676 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2677 2678 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2679 /*Name=*/".zero.addr"); 2680 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2681 // ThreadId for serialized parallels is 0. 2682 Address ThreadIDAddr = ZeroAddr; 2683 auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr]( 2684 CodeGenFunction &CGF, PrePostActionTy &Action) { 2685 Action.Enter(CGF); 2686 2687 Address ZeroAddr = 2688 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2689 /*Name=*/".bound.zero.addr"); 2690 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2691 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2692 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer()); 2693 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 2694 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 2695 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); 2696 }; 2697 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF, 2698 PrePostActionTy &) { 2699 2700 RegionCodeGenTy RCG(CodeGen); 2701 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2702 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2703 llvm::Value *Args[] = {RTLoc, ThreadID}; 2704 2705 NVPTXActionTy Action( 2706 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel), 2707 Args, 2708 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel), 2709 Args); 2710 RCG.setAction(Action); 2711 RCG(CGF); 2712 }; 2713 2714 if (IsInTargetMasterThreadRegion) { 2715 // In the worker need to use the real thread id. 2716 ThreadIDAddr = emitThreadIDAddress(CGF, Loc); 2717 RegionCodeGenTy RCG(CodeGen); 2718 RCG(CGF); 2719 } else { 2720 // If we are not in the target region, it is definitely L2 parallelism or 2721 // more, because for SPMD mode we always has L1 parallel level, sowe don't 2722 // need to check for orphaned directives. 2723 RegionCodeGenTy RCG(SeqGen); 2724 RCG(CGF); 2725 } 2726 } 2727 2728 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) { 2729 // Always emit simple barriers! 2730 if (!CGF.HaveInsertPoint()) 2731 return; 2732 // Build call __kmpc_barrier_simple_spmd(nullptr, 0); 2733 // This function does not use parameters, so we can emit just default values. 2734 llvm::Value *Args[] = { 2735 llvm::ConstantPointerNull::get( 2736 cast<llvm::PointerType>(getIdentTyPointerTy())), 2737 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)}; 2738 llvm::CallInst *Call = CGF.EmitRuntimeCall( 2739 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args); 2740 Call->setConvergent(); 2741 } 2742 2743 void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF, 2744 SourceLocation Loc, 2745 OpenMPDirectiveKind Kind, bool, 2746 bool) { 2747 // Always emit simple barriers! 2748 if (!CGF.HaveInsertPoint()) 2749 return; 2750 // Build call __kmpc_cancel_barrier(loc, thread_id); 2751 unsigned Flags = getDefaultFlagsForBarriers(Kind); 2752 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), 2753 getThreadID(CGF, Loc)}; 2754 llvm::CallInst *Call = CGF.EmitRuntimeCall( 2755 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args); 2756 Call->setConvergent(); 2757 } 2758 2759 void CGOpenMPRuntimeGPU::emitCriticalRegion( 2760 CodeGenFunction &CGF, StringRef CriticalName, 2761 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, 2762 const Expr *Hint) { 2763 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop"); 2764 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test"); 2765 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync"); 2766 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body"); 2767 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit"); 2768 2769 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 2770 2771 // Get the mask of active threads in the warp. 2772 llvm::Value *Mask = CGF.EmitRuntimeCall( 2773 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_warp_active_thread_mask)); 2774 // Fetch team-local id of the thread. 2775 llvm::Value *ThreadID = RT.getGPUThreadID(CGF); 2776 2777 // Get the width of the team. 2778 llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF); 2779 2780 // Initialize the counter variable for the loop. 2781 QualType Int32Ty = 2782 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0); 2783 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter"); 2784 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty); 2785 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal, 2786 /*isInit=*/true); 2787 2788 // Block checks if loop counter exceeds upper bound. 2789 CGF.EmitBlock(LoopBB); 2790 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 2791 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth); 2792 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB); 2793 2794 // Block tests which single thread should execute region, and which threads 2795 // should go straight to synchronisation point. 2796 CGF.EmitBlock(TestBB); 2797 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 2798 llvm::Value *CmpThreadToCounter = 2799 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal); 2800 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB); 2801 2802 // Block emits the body of the critical region. 2803 CGF.EmitBlock(BodyBB); 2804 2805 // Output the critical statement. 2806 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc, 2807 Hint); 2808 2809 // After the body surrounded by the critical region, the single executing 2810 // thread will jump to the synchronisation point. 2811 // Block waits for all threads in current team to finish then increments the 2812 // counter variable and returns to the loop. 2813 CGF.EmitBlock(SyncBB); 2814 // Reconverge active threads in the warp. 2815 (void)CGF.EmitRuntimeCall( 2816 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_syncwarp), Mask); 2817 2818 llvm::Value *IncCounterVal = 2819 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1)); 2820 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal); 2821 CGF.EmitBranch(LoopBB); 2822 2823 // Block that is reached when all threads in the team complete the region. 2824 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2825 } 2826 2827 /// Cast value to the specified type. 2828 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val, 2829 QualType ValTy, QualType CastTy, 2830 SourceLocation Loc) { 2831 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && 2832 "Cast type must sized."); 2833 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && 2834 "Val type must sized."); 2835 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy); 2836 if (ValTy == CastTy) 2837 return Val; 2838 if (CGF.getContext().getTypeSizeInChars(ValTy) == 2839 CGF.getContext().getTypeSizeInChars(CastTy)) 2840 return CGF.Builder.CreateBitCast(Val, LLVMCastTy); 2841 if (CastTy->isIntegerType() && ValTy->isIntegerType()) 2842 return CGF.Builder.CreateIntCast(Val, LLVMCastTy, 2843 CastTy->hasSignedIntegerRepresentation()); 2844 Address CastItem = CGF.CreateMemTemp(CastTy); 2845 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 2846 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace())); 2847 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy, 2848 LValueBaseInfo(AlignmentSource::Type), 2849 TBAAAccessInfo()); 2850 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc, 2851 LValueBaseInfo(AlignmentSource::Type), 2852 TBAAAccessInfo()); 2853 } 2854 2855 /// This function creates calls to one of two shuffle functions to copy 2856 /// variables between lanes in a warp. 2857 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF, 2858 llvm::Value *Elem, 2859 QualType ElemType, 2860 llvm::Value *Offset, 2861 SourceLocation Loc) { 2862 CodeGenModule &CGM = CGF.CGM; 2863 CGBuilderTy &Bld = CGF.Builder; 2864 CGOpenMPRuntimeGPU &RT = 2865 *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime())); 2866 2867 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 2868 assert(Size.getQuantity() <= 8 && 2869 "Unsupported bitwidth in shuffle instruction."); 2870 2871 OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4 2872 ? OMPRTL_NVPTX__kmpc_shuffle_int32 2873 : OMPRTL_NVPTX__kmpc_shuffle_int64; 2874 2875 // Cast all types to 32- or 64-bit values before calling shuffle routines. 2876 QualType CastTy = CGF.getContext().getIntTypeForBitwidth( 2877 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1); 2878 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc); 2879 llvm::Value *WarpSize = 2880 Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true); 2881 2882 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall( 2883 RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize}); 2884 2885 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc); 2886 } 2887 2888 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, 2889 Address DestAddr, QualType ElemType, 2890 llvm::Value *Offset, SourceLocation Loc) { 2891 CGBuilderTy &Bld = CGF.Builder; 2892 2893 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 2894 // Create the loop over the big sized data. 2895 // ptr = (void*)Elem; 2896 // ptrEnd = (void*) Elem + 1; 2897 // Step = 8; 2898 // while (ptr + Step < ptrEnd) 2899 // shuffle((int64_t)*ptr); 2900 // Step = 4; 2901 // while (ptr + Step < ptrEnd) 2902 // shuffle((int32_t)*ptr); 2903 // ... 2904 Address ElemPtr = DestAddr; 2905 Address Ptr = SrcAddr; 2906 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast( 2907 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy); 2908 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) { 2909 if (Size < CharUnits::fromQuantity(IntSize)) 2910 continue; 2911 QualType IntType = CGF.getContext().getIntTypeForBitwidth( 2912 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)), 2913 /*Signed=*/1); 2914 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType); 2915 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo()); 2916 ElemPtr = 2917 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo()); 2918 if (Size.getQuantity() / IntSize > 1) { 2919 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond"); 2920 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then"); 2921 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit"); 2922 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock(); 2923 CGF.EmitBlock(PreCondBB); 2924 llvm::PHINode *PhiSrc = 2925 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2); 2926 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB); 2927 llvm::PHINode *PhiDest = 2928 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2); 2929 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB); 2930 Ptr = Address(PhiSrc, Ptr.getAlignment()); 2931 ElemPtr = Address(PhiDest, ElemPtr.getAlignment()); 2932 llvm::Value *PtrDiff = Bld.CreatePtrDiff( 2933 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast( 2934 Ptr.getPointer(), CGF.VoidPtrTy)); 2935 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)), 2936 ThenBB, ExitBB); 2937 CGF.EmitBlock(ThenBB); 2938 llvm::Value *Res = createRuntimeShuffleFunction( 2939 CGF, 2940 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 2941 LValueBaseInfo(AlignmentSource::Type), 2942 TBAAAccessInfo()), 2943 IntType, Offset, Loc); 2944 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 2945 LValueBaseInfo(AlignmentSource::Type), 2946 TBAAAccessInfo()); 2947 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1); 2948 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 2949 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB); 2950 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB); 2951 CGF.EmitBranch(PreCondBB); 2952 CGF.EmitBlock(ExitBB); 2953 } else { 2954 llvm::Value *Res = createRuntimeShuffleFunction( 2955 CGF, 2956 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 2957 LValueBaseInfo(AlignmentSource::Type), 2958 TBAAAccessInfo()), 2959 IntType, Offset, Loc); 2960 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 2961 LValueBaseInfo(AlignmentSource::Type), 2962 TBAAAccessInfo()); 2963 Ptr = Bld.CreateConstGEP(Ptr, 1); 2964 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 2965 } 2966 Size = Size % IntSize; 2967 } 2968 } 2969 2970 namespace { 2971 enum CopyAction : unsigned { 2972 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in 2973 // the warp using shuffle instructions. 2974 RemoteLaneToThread, 2975 // ThreadCopy: Make a copy of a Reduce list on the thread's stack. 2976 ThreadCopy, 2977 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad. 2978 ThreadToScratchpad, 2979 // ScratchpadToThread: Copy from a scratchpad array in global memory 2980 // containing team-reduced data to a thread's stack. 2981 ScratchpadToThread, 2982 }; 2983 } // namespace 2984 2985 struct CopyOptionsTy { 2986 llvm::Value *RemoteLaneOffset; 2987 llvm::Value *ScratchpadIndex; 2988 llvm::Value *ScratchpadWidth; 2989 }; 2990 2991 /// Emit instructions to copy a Reduce list, which contains partially 2992 /// aggregated values, in the specified direction. 2993 static void emitReductionListCopy( 2994 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy, 2995 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase, 2996 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) { 2997 2998 CodeGenModule &CGM = CGF.CGM; 2999 ASTContext &C = CGM.getContext(); 3000 CGBuilderTy &Bld = CGF.Builder; 3001 3002 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset; 3003 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex; 3004 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth; 3005 3006 // Iterates, element-by-element, through the source Reduce list and 3007 // make a copy. 3008 unsigned Idx = 0; 3009 unsigned Size = Privates.size(); 3010 for (const Expr *Private : Privates) { 3011 Address SrcElementAddr = Address::invalid(); 3012 Address DestElementAddr = Address::invalid(); 3013 Address DestElementPtrAddr = Address::invalid(); 3014 // Should we shuffle in an element from a remote lane? 3015 bool ShuffleInElement = false; 3016 // Set to true to update the pointer in the dest Reduce list to a 3017 // newly created element. 3018 bool UpdateDestListPtr = false; 3019 // Increment the src or dest pointer to the scratchpad, for each 3020 // new element. 3021 bool IncrScratchpadSrc = false; 3022 bool IncrScratchpadDest = false; 3023 3024 switch (Action) { 3025 case RemoteLaneToThread: { 3026 // Step 1.1: Get the address for the src element in the Reduce list. 3027 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 3028 SrcElementAddr = CGF.EmitLoadOfPointer( 3029 SrcElementPtrAddr, 3030 C.getPointerType(Private->getType())->castAs<PointerType>()); 3031 3032 // Step 1.2: Create a temporary to store the element in the destination 3033 // Reduce list. 3034 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 3035 DestElementAddr = 3036 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 3037 ShuffleInElement = true; 3038 UpdateDestListPtr = true; 3039 break; 3040 } 3041 case ThreadCopy: { 3042 // Step 1.1: Get the address for the src element in the Reduce list. 3043 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 3044 SrcElementAddr = CGF.EmitLoadOfPointer( 3045 SrcElementPtrAddr, 3046 C.getPointerType(Private->getType())->castAs<PointerType>()); 3047 3048 // Step 1.2: Get the address for dest element. The destination 3049 // element has already been created on the thread's stack. 3050 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 3051 DestElementAddr = CGF.EmitLoadOfPointer( 3052 DestElementPtrAddr, 3053 C.getPointerType(Private->getType())->castAs<PointerType>()); 3054 break; 3055 } 3056 case ThreadToScratchpad: { 3057 // Step 1.1: Get the address for the src element in the Reduce list. 3058 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 3059 SrcElementAddr = CGF.EmitLoadOfPointer( 3060 SrcElementPtrAddr, 3061 C.getPointerType(Private->getType())->castAs<PointerType>()); 3062 3063 // Step 1.2: Get the address for dest element: 3064 // address = base + index * ElementSizeInChars. 3065 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 3066 llvm::Value *CurrentOffset = 3067 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 3068 llvm::Value *ScratchPadElemAbsolutePtrVal = 3069 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset); 3070 ScratchPadElemAbsolutePtrVal = 3071 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 3072 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, 3073 C.getTypeAlignInChars(Private->getType())); 3074 IncrScratchpadDest = true; 3075 break; 3076 } 3077 case ScratchpadToThread: { 3078 // Step 1.1: Get the address for the src element in the scratchpad. 3079 // address = base + index * ElementSizeInChars. 3080 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 3081 llvm::Value *CurrentOffset = 3082 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 3083 llvm::Value *ScratchPadElemAbsolutePtrVal = 3084 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset); 3085 ScratchPadElemAbsolutePtrVal = 3086 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 3087 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, 3088 C.getTypeAlignInChars(Private->getType())); 3089 IncrScratchpadSrc = true; 3090 3091 // Step 1.2: Create a temporary to store the element in the destination 3092 // Reduce list. 3093 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 3094 DestElementAddr = 3095 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 3096 UpdateDestListPtr = true; 3097 break; 3098 } 3099 } 3100 3101 // Regardless of src and dest of copy, we emit the load of src 3102 // element as this is required in all directions 3103 SrcElementAddr = Bld.CreateElementBitCast( 3104 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType())); 3105 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr, 3106 SrcElementAddr.getElementType()); 3107 3108 // Now that all active lanes have read the element in the 3109 // Reduce list, shuffle over the value from the remote lane. 3110 if (ShuffleInElement) { 3111 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(), 3112 RemoteLaneOffset, Private->getExprLoc()); 3113 } else { 3114 switch (CGF.getEvaluationKind(Private->getType())) { 3115 case TEK_Scalar: { 3116 llvm::Value *Elem = CGF.EmitLoadOfScalar( 3117 SrcElementAddr, /*Volatile=*/false, Private->getType(), 3118 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type), 3119 TBAAAccessInfo()); 3120 // Store the source element value to the dest element address. 3121 CGF.EmitStoreOfScalar( 3122 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(), 3123 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 3124 break; 3125 } 3126 case TEK_Complex: { 3127 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex( 3128 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 3129 Private->getExprLoc()); 3130 CGF.EmitStoreOfComplex( 3131 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 3132 /*isInit=*/false); 3133 break; 3134 } 3135 case TEK_Aggregate: 3136 CGF.EmitAggregateCopy( 3137 CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 3138 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 3139 Private->getType(), AggValueSlot::DoesNotOverlap); 3140 break; 3141 } 3142 } 3143 3144 // Step 3.1: Modify reference in dest Reduce list as needed. 3145 // Modifying the reference in Reduce list to point to the newly 3146 // created element. The element is live in the current function 3147 // scope and that of functions it invokes (i.e., reduce_function). 3148 // RemoteReduceData[i] = (void*)&RemoteElem 3149 if (UpdateDestListPtr) { 3150 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast( 3151 DestElementAddr.getPointer(), CGF.VoidPtrTy), 3152 DestElementPtrAddr, /*Volatile=*/false, 3153 C.VoidPtrTy); 3154 } 3155 3156 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting 3157 // address of the next element in scratchpad memory, unless we're currently 3158 // processing the last one. Memory alignment is also taken care of here. 3159 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) { 3160 llvm::Value *ScratchpadBasePtr = 3161 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer(); 3162 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 3163 ScratchpadBasePtr = Bld.CreateNUWAdd( 3164 ScratchpadBasePtr, 3165 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars)); 3166 3167 // Take care of global memory alignment for performance 3168 ScratchpadBasePtr = Bld.CreateNUWSub( 3169 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 3170 ScratchpadBasePtr = Bld.CreateUDiv( 3171 ScratchpadBasePtr, 3172 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 3173 ScratchpadBasePtr = Bld.CreateNUWAdd( 3174 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 3175 ScratchpadBasePtr = Bld.CreateNUWMul( 3176 ScratchpadBasePtr, 3177 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 3178 3179 if (IncrScratchpadDest) 3180 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 3181 else /* IncrScratchpadSrc = true */ 3182 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 3183 } 3184 3185 ++Idx; 3186 } 3187 } 3188 3189 /// This function emits a helper that gathers Reduce lists from the first 3190 /// lane of every active warp to lanes in the first warp. 3191 /// 3192 /// void inter_warp_copy_func(void* reduce_data, num_warps) 3193 /// shared smem[warp_size]; 3194 /// For all data entries D in reduce_data: 3195 /// sync 3196 /// If (I am the first lane in each warp) 3197 /// Copy my local D to smem[warp_id] 3198 /// sync 3199 /// if (I am the first warp) 3200 /// Copy smem[thread_id] to my local D 3201 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM, 3202 ArrayRef<const Expr *> Privates, 3203 QualType ReductionArrayTy, 3204 SourceLocation Loc) { 3205 ASTContext &C = CGM.getContext(); 3206 llvm::Module &M = CGM.getModule(); 3207 3208 // ReduceList: thread local Reduce list. 3209 // At the stage of the computation when this function is called, partially 3210 // aggregated values reside in the first lane of every active warp. 3211 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3212 C.VoidPtrTy, ImplicitParamDecl::Other); 3213 // NumWarps: number of warps active in the parallel region. This could 3214 // be smaller than 32 (max warps in a CTA) for partial block reduction. 3215 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3216 C.getIntTypeForBitwidth(32, /* Signed */ true), 3217 ImplicitParamDecl::Other); 3218 FunctionArgList Args; 3219 Args.push_back(&ReduceListArg); 3220 Args.push_back(&NumWarpsArg); 3221 3222 const CGFunctionInfo &CGFI = 3223 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3224 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI), 3225 llvm::GlobalValue::InternalLinkage, 3226 "_omp_reduction_inter_warp_copy_func", &M); 3227 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3228 Fn->setDoesNotRecurse(); 3229 CodeGenFunction CGF(CGM); 3230 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3231 3232 CGBuilderTy &Bld = CGF.Builder; 3233 3234 // This array is used as a medium to transfer, one reduce element at a time, 3235 // the data from the first lane of every warp to lanes in the first warp 3236 // in order to perform the final step of a reduction in a parallel region 3237 // (reduction across warps). The array is placed in NVPTX __shared__ memory 3238 // for reduced latency, as well as to have a distinct copy for concurrently 3239 // executing target regions. The array is declared with common linkage so 3240 // as to be shared across compilation units. 3241 StringRef TransferMediumName = 3242 "__openmp_nvptx_data_transfer_temporary_storage"; 3243 llvm::GlobalVariable *TransferMedium = 3244 M.getGlobalVariable(TransferMediumName); 3245 unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size); 3246 if (!TransferMedium) { 3247 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize); 3248 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared); 3249 TransferMedium = new llvm::GlobalVariable( 3250 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage, 3251 llvm::Constant::getNullValue(Ty), TransferMediumName, 3252 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, 3253 SharedAddressSpace); 3254 CGM.addCompilerUsedGlobal(TransferMedium); 3255 } 3256 3257 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 3258 // Get the CUDA thread id of the current OpenMP thread on the GPU. 3259 llvm::Value *ThreadID = RT.getGPUThreadID(CGF); 3260 // nvptx_lane_id = nvptx_id % warpsize 3261 llvm::Value *LaneID = getNVPTXLaneID(CGF); 3262 // nvptx_warp_id = nvptx_id / warpsize 3263 llvm::Value *WarpID = getNVPTXWarpID(CGF); 3264 3265 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3266 Address LocalReduceList( 3267 Bld.CreatePointerBitCastOrAddrSpaceCast( 3268 CGF.EmitLoadOfScalar( 3269 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc, 3270 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()), 3271 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3272 CGF.getPointerAlign()); 3273 3274 unsigned Idx = 0; 3275 for (const Expr *Private : Privates) { 3276 // 3277 // Warp master copies reduce element to transfer medium in __shared__ 3278 // memory. 3279 // 3280 unsigned RealTySize = 3281 C.getTypeSizeInChars(Private->getType()) 3282 .alignTo(C.getTypeAlignInChars(Private->getType())) 3283 .getQuantity(); 3284 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) { 3285 unsigned NumIters = RealTySize / TySize; 3286 if (NumIters == 0) 3287 continue; 3288 QualType CType = C.getIntTypeForBitwidth( 3289 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1); 3290 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType); 3291 CharUnits Align = CharUnits::fromQuantity(TySize); 3292 llvm::Value *Cnt = nullptr; 3293 Address CntAddr = Address::invalid(); 3294 llvm::BasicBlock *PrecondBB = nullptr; 3295 llvm::BasicBlock *ExitBB = nullptr; 3296 if (NumIters > 1) { 3297 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr"); 3298 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr, 3299 /*Volatile=*/false, C.IntTy); 3300 PrecondBB = CGF.createBasicBlock("precond"); 3301 ExitBB = CGF.createBasicBlock("exit"); 3302 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body"); 3303 // There is no need to emit line number for unconditional branch. 3304 (void)ApplyDebugLocation::CreateEmpty(CGF); 3305 CGF.EmitBlock(PrecondBB); 3306 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc); 3307 llvm::Value *Cmp = 3308 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters)); 3309 Bld.CreateCondBr(Cmp, BodyBB, ExitBB); 3310 CGF.EmitBlock(BodyBB); 3311 } 3312 // kmpc_barrier. 3313 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 3314 /*EmitChecks=*/false, 3315 /*ForceSimpleCall=*/true); 3316 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 3317 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 3318 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 3319 3320 // if (lane_id == 0) 3321 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master"); 3322 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB); 3323 CGF.EmitBlock(ThenBB); 3324 3325 // Reduce element = LocalReduceList[i] 3326 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3327 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 3328 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 3329 // elemptr = ((CopyType*)(elemptrptr)) + I 3330 Address ElemPtr = Address(ElemPtrPtr, Align); 3331 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType); 3332 if (NumIters > 1) { 3333 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt), 3334 ElemPtr.getAlignment()); 3335 } 3336 3337 // Get pointer to location in transfer medium. 3338 // MediumPtr = &medium[warp_id] 3339 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP( 3340 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID}); 3341 Address MediumPtr(MediumPtrVal, Align); 3342 // Casting to actual data type. 3343 // MediumPtr = (CopyType*)MediumPtrAddr; 3344 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType); 3345 3346 // elem = *elemptr 3347 //*MediumPtr = elem 3348 llvm::Value *Elem = CGF.EmitLoadOfScalar( 3349 ElemPtr, /*Volatile=*/false, CType, Loc, 3350 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 3351 // Store the source element value to the dest element address. 3352 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType, 3353 LValueBaseInfo(AlignmentSource::Type), 3354 TBAAAccessInfo()); 3355 3356 Bld.CreateBr(MergeBB); 3357 3358 CGF.EmitBlock(ElseBB); 3359 Bld.CreateBr(MergeBB); 3360 3361 CGF.EmitBlock(MergeBB); 3362 3363 // kmpc_barrier. 3364 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 3365 /*EmitChecks=*/false, 3366 /*ForceSimpleCall=*/true); 3367 3368 // 3369 // Warp 0 copies reduce element from transfer medium. 3370 // 3371 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then"); 3372 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else"); 3373 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont"); 3374 3375 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg); 3376 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar( 3377 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc); 3378 3379 // Up to 32 threads in warp 0 are active. 3380 llvm::Value *IsActiveThread = 3381 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread"); 3382 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB); 3383 3384 CGF.EmitBlock(W0ThenBB); 3385 3386 // SrcMediumPtr = &medium[tid] 3387 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP( 3388 TransferMedium, 3389 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID}); 3390 Address SrcMediumPtr(SrcMediumPtrVal, Align); 3391 // SrcMediumVal = *SrcMediumPtr; 3392 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType); 3393 3394 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I 3395 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3396 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar( 3397 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc); 3398 Address TargetElemPtr = Address(TargetElemPtrVal, Align); 3399 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType); 3400 if (NumIters > 1) { 3401 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt), 3402 TargetElemPtr.getAlignment()); 3403 } 3404 3405 // *TargetElemPtr = SrcMediumVal; 3406 llvm::Value *SrcMediumValue = 3407 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc); 3408 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false, 3409 CType); 3410 Bld.CreateBr(W0MergeBB); 3411 3412 CGF.EmitBlock(W0ElseBB); 3413 Bld.CreateBr(W0MergeBB); 3414 3415 CGF.EmitBlock(W0MergeBB); 3416 3417 if (NumIters > 1) { 3418 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1)); 3419 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy); 3420 CGF.EmitBranch(PrecondBB); 3421 (void)ApplyDebugLocation::CreateEmpty(CGF); 3422 CGF.EmitBlock(ExitBB); 3423 } 3424 RealTySize %= TySize; 3425 } 3426 ++Idx; 3427 } 3428 3429 CGF.FinishFunction(); 3430 return Fn; 3431 } 3432 3433 /// Emit a helper that reduces data across two OpenMP threads (lanes) 3434 /// in the same warp. It uses shuffle instructions to copy over data from 3435 /// a remote lane's stack. The reduction algorithm performed is specified 3436 /// by the fourth parameter. 3437 /// 3438 /// Algorithm Versions. 3439 /// Full Warp Reduce (argument value 0): 3440 /// This algorithm assumes that all 32 lanes are active and gathers 3441 /// data from these 32 lanes, producing a single resultant value. 3442 /// Contiguous Partial Warp Reduce (argument value 1): 3443 /// This algorithm assumes that only a *contiguous* subset of lanes 3444 /// are active. This happens for the last warp in a parallel region 3445 /// when the user specified num_threads is not an integer multiple of 3446 /// 32. This contiguous subset always starts with the zeroth lane. 3447 /// Partial Warp Reduce (argument value 2): 3448 /// This algorithm gathers data from any number of lanes at any position. 3449 /// All reduced values are stored in the lowest possible lane. The set 3450 /// of problems every algorithm addresses is a super set of those 3451 /// addressable by algorithms with a lower version number. Overhead 3452 /// increases as algorithm version increases. 3453 /// 3454 /// Terminology 3455 /// Reduce element: 3456 /// Reduce element refers to the individual data field with primitive 3457 /// data types to be combined and reduced across threads. 3458 /// Reduce list: 3459 /// Reduce list refers to a collection of local, thread-private 3460 /// reduce elements. 3461 /// Remote Reduce list: 3462 /// Remote Reduce list refers to a collection of remote (relative to 3463 /// the current thread) reduce elements. 3464 /// 3465 /// We distinguish between three states of threads that are important to 3466 /// the implementation of this function. 3467 /// Alive threads: 3468 /// Threads in a warp executing the SIMT instruction, as distinguished from 3469 /// threads that are inactive due to divergent control flow. 3470 /// Active threads: 3471 /// The minimal set of threads that has to be alive upon entry to this 3472 /// function. The computation is correct iff active threads are alive. 3473 /// Some threads are alive but they are not active because they do not 3474 /// contribute to the computation in any useful manner. Turning them off 3475 /// may introduce control flow overheads without any tangible benefits. 3476 /// Effective threads: 3477 /// In order to comply with the argument requirements of the shuffle 3478 /// function, we must keep all lanes holding data alive. But at most 3479 /// half of them perform value aggregation; we refer to this half of 3480 /// threads as effective. The other half is simply handing off their 3481 /// data. 3482 /// 3483 /// Procedure 3484 /// Value shuffle: 3485 /// In this step active threads transfer data from higher lane positions 3486 /// in the warp to lower lane positions, creating Remote Reduce list. 3487 /// Value aggregation: 3488 /// In this step, effective threads combine their thread local Reduce list 3489 /// with Remote Reduce list and store the result in the thread local 3490 /// Reduce list. 3491 /// Value copy: 3492 /// In this step, we deal with the assumption made by algorithm 2 3493 /// (i.e. contiguity assumption). When we have an odd number of lanes 3494 /// active, say 2k+1, only k threads will be effective and therefore k 3495 /// new values will be produced. However, the Reduce list owned by the 3496 /// (2k+1)th thread is ignored in the value aggregation. Therefore 3497 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so 3498 /// that the contiguity assumption still holds. 3499 static llvm::Function *emitShuffleAndReduceFunction( 3500 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3501 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) { 3502 ASTContext &C = CGM.getContext(); 3503 3504 // Thread local Reduce list used to host the values of data to be reduced. 3505 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3506 C.VoidPtrTy, ImplicitParamDecl::Other); 3507 // Current lane id; could be logical. 3508 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy, 3509 ImplicitParamDecl::Other); 3510 // Offset of the remote source lane relative to the current lane. 3511 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3512 C.ShortTy, ImplicitParamDecl::Other); 3513 // Algorithm version. This is expected to be known at compile time. 3514 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3515 C.ShortTy, ImplicitParamDecl::Other); 3516 FunctionArgList Args; 3517 Args.push_back(&ReduceListArg); 3518 Args.push_back(&LaneIDArg); 3519 Args.push_back(&RemoteLaneOffsetArg); 3520 Args.push_back(&AlgoVerArg); 3521 3522 const CGFunctionInfo &CGFI = 3523 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3524 auto *Fn = llvm::Function::Create( 3525 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3526 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule()); 3527 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3528 Fn->setDoesNotRecurse(); 3529 if (CGM.getLangOpts().Optimize) { 3530 Fn->removeFnAttr(llvm::Attribute::NoInline); 3531 Fn->removeFnAttr(llvm::Attribute::OptimizeNone); 3532 Fn->addFnAttr(llvm::Attribute::AlwaysInline); 3533 } 3534 3535 CodeGenFunction CGF(CGM); 3536 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3537 3538 CGBuilderTy &Bld = CGF.Builder; 3539 3540 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3541 Address LocalReduceList( 3542 Bld.CreatePointerBitCastOrAddrSpaceCast( 3543 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 3544 C.VoidPtrTy, SourceLocation()), 3545 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3546 CGF.getPointerAlign()); 3547 3548 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg); 3549 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar( 3550 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 3551 3552 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg); 3553 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar( 3554 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 3555 3556 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg); 3557 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar( 3558 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 3559 3560 // Create a local thread-private variable to host the Reduce list 3561 // from a remote lane. 3562 Address RemoteReduceList = 3563 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list"); 3564 3565 // This loop iterates through the list of reduce elements and copies, 3566 // element by element, from a remote lane in the warp to RemoteReduceList, 3567 // hosted on the thread's stack. 3568 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates, 3569 LocalReduceList, RemoteReduceList, 3570 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal, 3571 /*ScratchpadIndex=*/nullptr, 3572 /*ScratchpadWidth=*/nullptr}); 3573 3574 // The actions to be performed on the Remote Reduce list is dependent 3575 // on the algorithm version. 3576 // 3577 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 && 3578 // LaneId % 2 == 0 && Offset > 0): 3579 // do the reduction value aggregation 3580 // 3581 // The thread local variable Reduce list is mutated in place to host the 3582 // reduced data, which is the aggregated value produced from local and 3583 // remote lanes. 3584 // 3585 // Note that AlgoVer is expected to be a constant integer known at compile 3586 // time. 3587 // When AlgoVer==0, the first conjunction evaluates to true, making 3588 // the entire predicate true during compile time. 3589 // When AlgoVer==1, the second conjunction has only the second part to be 3590 // evaluated during runtime. Other conjunctions evaluates to false 3591 // during compile time. 3592 // When AlgoVer==2, the third conjunction has only the second part to be 3593 // evaluated during runtime. Other conjunctions evaluates to false 3594 // during compile time. 3595 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal); 3596 3597 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 3598 llvm::Value *CondAlgo1 = Bld.CreateAnd( 3599 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal)); 3600 3601 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2)); 3602 llvm::Value *CondAlgo2 = Bld.CreateAnd( 3603 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)))); 3604 CondAlgo2 = Bld.CreateAnd( 3605 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0))); 3606 3607 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1); 3608 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2); 3609 3610 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 3611 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 3612 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 3613 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB); 3614 3615 CGF.EmitBlock(ThenBB); 3616 // reduce_function(LocalReduceList, RemoteReduceList) 3617 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3618 LocalReduceList.getPointer(), CGF.VoidPtrTy); 3619 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3620 RemoteReduceList.getPointer(), CGF.VoidPtrTy); 3621 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3622 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr}); 3623 Bld.CreateBr(MergeBB); 3624 3625 CGF.EmitBlock(ElseBB); 3626 Bld.CreateBr(MergeBB); 3627 3628 CGF.EmitBlock(MergeBB); 3629 3630 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local 3631 // Reduce list. 3632 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 3633 llvm::Value *CondCopy = Bld.CreateAnd( 3634 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal)); 3635 3636 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then"); 3637 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else"); 3638 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont"); 3639 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB); 3640 3641 CGF.EmitBlock(CpyThenBB); 3642 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates, 3643 RemoteReduceList, LocalReduceList); 3644 Bld.CreateBr(CpyMergeBB); 3645 3646 CGF.EmitBlock(CpyElseBB); 3647 Bld.CreateBr(CpyMergeBB); 3648 3649 CGF.EmitBlock(CpyMergeBB); 3650 3651 CGF.FinishFunction(); 3652 return Fn; 3653 } 3654 3655 /// This function emits a helper that copies all the reduction variables from 3656 /// the team into the provided global buffer for the reduction variables. 3657 /// 3658 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 3659 /// For all data entries D in reduce_data: 3660 /// Copy local D to buffer.D[Idx] 3661 static llvm::Value *emitListToGlobalCopyFunction( 3662 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3663 QualType ReductionArrayTy, SourceLocation Loc, 3664 const RecordDecl *TeamReductionRec, 3665 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3666 &VarFieldMap) { 3667 ASTContext &C = CGM.getContext(); 3668 3669 // Buffer: global reduction buffer. 3670 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3671 C.VoidPtrTy, ImplicitParamDecl::Other); 3672 // Idx: index of the buffer. 3673 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3674 ImplicitParamDecl::Other); 3675 // ReduceList: thread local Reduce list. 3676 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3677 C.VoidPtrTy, ImplicitParamDecl::Other); 3678 FunctionArgList Args; 3679 Args.push_back(&BufferArg); 3680 Args.push_back(&IdxArg); 3681 Args.push_back(&ReduceListArg); 3682 3683 const CGFunctionInfo &CGFI = 3684 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3685 auto *Fn = llvm::Function::Create( 3686 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3687 "_omp_reduction_list_to_global_copy_func", &CGM.getModule()); 3688 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3689 Fn->setDoesNotRecurse(); 3690 CodeGenFunction CGF(CGM); 3691 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3692 3693 CGBuilderTy &Bld = CGF.Builder; 3694 3695 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3696 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 3697 Address LocalReduceList( 3698 Bld.CreatePointerBitCastOrAddrSpaceCast( 3699 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 3700 C.VoidPtrTy, Loc), 3701 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3702 CGF.getPointerAlign()); 3703 QualType StaticTy = C.getRecordType(TeamReductionRec); 3704 llvm::Type *LLVMReductionsBufferTy = 3705 CGM.getTypes().ConvertTypeForMem(StaticTy); 3706 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3707 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 3708 LLVMReductionsBufferTy->getPointerTo()); 3709 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 3710 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 3711 /*Volatile=*/false, C.IntTy, 3712 Loc)}; 3713 unsigned Idx = 0; 3714 for (const Expr *Private : Privates) { 3715 // Reduce element = LocalReduceList[i] 3716 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3717 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 3718 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 3719 // elemptr = ((CopyType*)(elemptrptr)) + I 3720 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3721 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 3722 Address ElemPtr = 3723 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 3724 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 3725 // Global = Buffer.VD[Idx]; 3726 const FieldDecl *FD = VarFieldMap.lookup(VD); 3727 LValue GlobLVal = CGF.EmitLValueForField( 3728 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 3729 llvm::Value *BufferPtr = 3730 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 3731 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment())); 3732 switch (CGF.getEvaluationKind(Private->getType())) { 3733 case TEK_Scalar: { 3734 llvm::Value *V = CGF.EmitLoadOfScalar( 3735 ElemPtr, /*Volatile=*/false, Private->getType(), Loc, 3736 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 3737 CGF.EmitStoreOfScalar(V, GlobLVal); 3738 break; 3739 } 3740 case TEK_Complex: { 3741 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex( 3742 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc); 3743 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false); 3744 break; 3745 } 3746 case TEK_Aggregate: 3747 CGF.EmitAggregateCopy(GlobLVal, 3748 CGF.MakeAddrLValue(ElemPtr, Private->getType()), 3749 Private->getType(), AggValueSlot::DoesNotOverlap); 3750 break; 3751 } 3752 ++Idx; 3753 } 3754 3755 CGF.FinishFunction(); 3756 return Fn; 3757 } 3758 3759 /// This function emits a helper that reduces all the reduction variables from 3760 /// the team into the provided global buffer for the reduction variables. 3761 /// 3762 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data) 3763 /// void *GlobPtrs[]; 3764 /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 3765 /// ... 3766 /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 3767 /// reduce_function(GlobPtrs, reduce_data); 3768 static llvm::Value *emitListToGlobalReduceFunction( 3769 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3770 QualType ReductionArrayTy, SourceLocation Loc, 3771 const RecordDecl *TeamReductionRec, 3772 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3773 &VarFieldMap, 3774 llvm::Function *ReduceFn) { 3775 ASTContext &C = CGM.getContext(); 3776 3777 // Buffer: global reduction buffer. 3778 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3779 C.VoidPtrTy, ImplicitParamDecl::Other); 3780 // Idx: index of the buffer. 3781 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3782 ImplicitParamDecl::Other); 3783 // ReduceList: thread local Reduce list. 3784 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3785 C.VoidPtrTy, ImplicitParamDecl::Other); 3786 FunctionArgList Args; 3787 Args.push_back(&BufferArg); 3788 Args.push_back(&IdxArg); 3789 Args.push_back(&ReduceListArg); 3790 3791 const CGFunctionInfo &CGFI = 3792 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3793 auto *Fn = llvm::Function::Create( 3794 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3795 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule()); 3796 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3797 Fn->setDoesNotRecurse(); 3798 CodeGenFunction CGF(CGM); 3799 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3800 3801 CGBuilderTy &Bld = CGF.Builder; 3802 3803 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 3804 QualType StaticTy = C.getRecordType(TeamReductionRec); 3805 llvm::Type *LLVMReductionsBufferTy = 3806 CGM.getTypes().ConvertTypeForMem(StaticTy); 3807 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3808 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 3809 LLVMReductionsBufferTy->getPointerTo()); 3810 3811 // 1. Build a list of reduction variables. 3812 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 3813 Address ReductionList = 3814 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 3815 auto IPriv = Privates.begin(); 3816 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 3817 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 3818 /*Volatile=*/false, C.IntTy, 3819 Loc)}; 3820 unsigned Idx = 0; 3821 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 3822 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3823 // Global = Buffer.VD[Idx]; 3824 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 3825 const FieldDecl *FD = VarFieldMap.lookup(VD); 3826 LValue GlobLVal = CGF.EmitLValueForField( 3827 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 3828 llvm::Value *BufferPtr = 3829 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 3830 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 3831 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 3832 if ((*IPriv)->getType()->isVariablyModifiedType()) { 3833 // Store array size. 3834 ++Idx; 3835 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3836 llvm::Value *Size = CGF.Builder.CreateIntCast( 3837 CGF.getVLASize( 3838 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 3839 .NumElts, 3840 CGF.SizeTy, /*isSigned=*/false); 3841 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 3842 Elem); 3843 } 3844 } 3845 3846 // Call reduce_function(GlobalReduceList, ReduceList) 3847 llvm::Value *GlobalReduceList = 3848 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 3849 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3850 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 3851 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 3852 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3853 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr}); 3854 CGF.FinishFunction(); 3855 return Fn; 3856 } 3857 3858 /// This function emits a helper that copies all the reduction variables from 3859 /// the team into the provided global buffer for the reduction variables. 3860 /// 3861 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 3862 /// For all data entries D in reduce_data: 3863 /// Copy buffer.D[Idx] to local D; 3864 static llvm::Value *emitGlobalToListCopyFunction( 3865 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3866 QualType ReductionArrayTy, SourceLocation Loc, 3867 const RecordDecl *TeamReductionRec, 3868 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3869 &VarFieldMap) { 3870 ASTContext &C = CGM.getContext(); 3871 3872 // Buffer: global reduction buffer. 3873 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3874 C.VoidPtrTy, ImplicitParamDecl::Other); 3875 // Idx: index of the buffer. 3876 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3877 ImplicitParamDecl::Other); 3878 // ReduceList: thread local Reduce list. 3879 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3880 C.VoidPtrTy, ImplicitParamDecl::Other); 3881 FunctionArgList Args; 3882 Args.push_back(&BufferArg); 3883 Args.push_back(&IdxArg); 3884 Args.push_back(&ReduceListArg); 3885 3886 const CGFunctionInfo &CGFI = 3887 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3888 auto *Fn = llvm::Function::Create( 3889 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3890 "_omp_reduction_global_to_list_copy_func", &CGM.getModule()); 3891 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3892 Fn->setDoesNotRecurse(); 3893 CodeGenFunction CGF(CGM); 3894 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3895 3896 CGBuilderTy &Bld = CGF.Builder; 3897 3898 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3899 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 3900 Address LocalReduceList( 3901 Bld.CreatePointerBitCastOrAddrSpaceCast( 3902 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 3903 C.VoidPtrTy, Loc), 3904 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3905 CGF.getPointerAlign()); 3906 QualType StaticTy = C.getRecordType(TeamReductionRec); 3907 llvm::Type *LLVMReductionsBufferTy = 3908 CGM.getTypes().ConvertTypeForMem(StaticTy); 3909 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3910 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 3911 LLVMReductionsBufferTy->getPointerTo()); 3912 3913 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 3914 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 3915 /*Volatile=*/false, C.IntTy, 3916 Loc)}; 3917 unsigned Idx = 0; 3918 for (const Expr *Private : Privates) { 3919 // Reduce element = LocalReduceList[i] 3920 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3921 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 3922 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 3923 // elemptr = ((CopyType*)(elemptrptr)) + I 3924 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3925 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 3926 Address ElemPtr = 3927 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 3928 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 3929 // Global = Buffer.VD[Idx]; 3930 const FieldDecl *FD = VarFieldMap.lookup(VD); 3931 LValue GlobLVal = CGF.EmitLValueForField( 3932 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 3933 llvm::Value *BufferPtr = 3934 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 3935 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment())); 3936 switch (CGF.getEvaluationKind(Private->getType())) { 3937 case TEK_Scalar: { 3938 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc); 3939 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(), 3940 LValueBaseInfo(AlignmentSource::Type), 3941 TBAAAccessInfo()); 3942 break; 3943 } 3944 case TEK_Complex: { 3945 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc); 3946 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()), 3947 /*isInit=*/false); 3948 break; 3949 } 3950 case TEK_Aggregate: 3951 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()), 3952 GlobLVal, Private->getType(), 3953 AggValueSlot::DoesNotOverlap); 3954 break; 3955 } 3956 ++Idx; 3957 } 3958 3959 CGF.FinishFunction(); 3960 return Fn; 3961 } 3962 3963 /// This function emits a helper that reduces all the reduction variables from 3964 /// the team into the provided global buffer for the reduction variables. 3965 /// 3966 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data) 3967 /// void *GlobPtrs[]; 3968 /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 3969 /// ... 3970 /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 3971 /// reduce_function(reduce_data, GlobPtrs); 3972 static llvm::Value *emitGlobalToListReduceFunction( 3973 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3974 QualType ReductionArrayTy, SourceLocation Loc, 3975 const RecordDecl *TeamReductionRec, 3976 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3977 &VarFieldMap, 3978 llvm::Function *ReduceFn) { 3979 ASTContext &C = CGM.getContext(); 3980 3981 // Buffer: global reduction buffer. 3982 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3983 C.VoidPtrTy, ImplicitParamDecl::Other); 3984 // Idx: index of the buffer. 3985 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3986 ImplicitParamDecl::Other); 3987 // ReduceList: thread local Reduce list. 3988 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3989 C.VoidPtrTy, ImplicitParamDecl::Other); 3990 FunctionArgList Args; 3991 Args.push_back(&BufferArg); 3992 Args.push_back(&IdxArg); 3993 Args.push_back(&ReduceListArg); 3994 3995 const CGFunctionInfo &CGFI = 3996 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3997 auto *Fn = llvm::Function::Create( 3998 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3999 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule()); 4000 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 4001 Fn->setDoesNotRecurse(); 4002 CodeGenFunction CGF(CGM); 4003 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 4004 4005 CGBuilderTy &Bld = CGF.Builder; 4006 4007 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 4008 QualType StaticTy = C.getRecordType(TeamReductionRec); 4009 llvm::Type *LLVMReductionsBufferTy = 4010 CGM.getTypes().ConvertTypeForMem(StaticTy); 4011 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 4012 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 4013 LLVMReductionsBufferTy->getPointerTo()); 4014 4015 // 1. Build a list of reduction variables. 4016 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 4017 Address ReductionList = 4018 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 4019 auto IPriv = Privates.begin(); 4020 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 4021 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 4022 /*Volatile=*/false, C.IntTy, 4023 Loc)}; 4024 unsigned Idx = 0; 4025 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 4026 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4027 // Global = Buffer.VD[Idx]; 4028 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 4029 const FieldDecl *FD = VarFieldMap.lookup(VD); 4030 LValue GlobLVal = CGF.EmitLValueForField( 4031 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 4032 llvm::Value *BufferPtr = 4033 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 4034 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 4035 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 4036 if ((*IPriv)->getType()->isVariablyModifiedType()) { 4037 // Store array size. 4038 ++Idx; 4039 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4040 llvm::Value *Size = CGF.Builder.CreateIntCast( 4041 CGF.getVLASize( 4042 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 4043 .NumElts, 4044 CGF.SizeTy, /*isSigned=*/false); 4045 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 4046 Elem); 4047 } 4048 } 4049 4050 // Call reduce_function(ReduceList, GlobalReduceList) 4051 llvm::Value *GlobalReduceList = 4052 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 4053 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 4054 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 4055 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 4056 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4057 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList}); 4058 CGF.FinishFunction(); 4059 return Fn; 4060 } 4061 4062 /// 4063 /// Design of OpenMP reductions on the GPU 4064 /// 4065 /// Consider a typical OpenMP program with one or more reduction 4066 /// clauses: 4067 /// 4068 /// float foo; 4069 /// double bar; 4070 /// #pragma omp target teams distribute parallel for \ 4071 /// reduction(+:foo) reduction(*:bar) 4072 /// for (int i = 0; i < N; i++) { 4073 /// foo += A[i]; bar *= B[i]; 4074 /// } 4075 /// 4076 /// where 'foo' and 'bar' are reduced across all OpenMP threads in 4077 /// all teams. In our OpenMP implementation on the NVPTX device an 4078 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads 4079 /// within a team are mapped to CUDA threads within a threadblock. 4080 /// Our goal is to efficiently aggregate values across all OpenMP 4081 /// threads such that: 4082 /// 4083 /// - the compiler and runtime are logically concise, and 4084 /// - the reduction is performed efficiently in a hierarchical 4085 /// manner as follows: within OpenMP threads in the same warp, 4086 /// across warps in a threadblock, and finally across teams on 4087 /// the NVPTX device. 4088 /// 4089 /// Introduction to Decoupling 4090 /// 4091 /// We would like to decouple the compiler and the runtime so that the 4092 /// latter is ignorant of the reduction variables (number, data types) 4093 /// and the reduction operators. This allows a simpler interface 4094 /// and implementation while still attaining good performance. 4095 /// 4096 /// Pseudocode for the aforementioned OpenMP program generated by the 4097 /// compiler is as follows: 4098 /// 4099 /// 1. Create private copies of reduction variables on each OpenMP 4100 /// thread: 'foo_private', 'bar_private' 4101 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned 4102 /// to it and writes the result in 'foo_private' and 'bar_private' 4103 /// respectively. 4104 /// 3. Call the OpenMP runtime on the GPU to reduce within a team 4105 /// and store the result on the team master: 4106 /// 4107 /// __kmpc_nvptx_parallel_reduce_nowait_v2(..., 4108 /// reduceData, shuffleReduceFn, interWarpCpyFn) 4109 /// 4110 /// where: 4111 /// struct ReduceData { 4112 /// double *foo; 4113 /// double *bar; 4114 /// } reduceData 4115 /// reduceData.foo = &foo_private 4116 /// reduceData.bar = &bar_private 4117 /// 4118 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two 4119 /// auxiliary functions generated by the compiler that operate on 4120 /// variables of type 'ReduceData'. They aid the runtime perform 4121 /// algorithmic steps in a data agnostic manner. 4122 /// 4123 /// 'shuffleReduceFn' is a pointer to a function that reduces data 4124 /// of type 'ReduceData' across two OpenMP threads (lanes) in the 4125 /// same warp. It takes the following arguments as input: 4126 /// 4127 /// a. variable of type 'ReduceData' on the calling lane, 4128 /// b. its lane_id, 4129 /// c. an offset relative to the current lane_id to generate a 4130 /// remote_lane_id. The remote lane contains the second 4131 /// variable of type 'ReduceData' that is to be reduced. 4132 /// d. an algorithm version parameter determining which reduction 4133 /// algorithm to use. 4134 /// 4135 /// 'shuffleReduceFn' retrieves data from the remote lane using 4136 /// efficient GPU shuffle intrinsics and reduces, using the 4137 /// algorithm specified by the 4th parameter, the two operands 4138 /// element-wise. The result is written to the first operand. 4139 /// 4140 /// Different reduction algorithms are implemented in different 4141 /// runtime functions, all calling 'shuffleReduceFn' to perform 4142 /// the essential reduction step. Therefore, based on the 4th 4143 /// parameter, this function behaves slightly differently to 4144 /// cooperate with the runtime to ensure correctness under 4145 /// different circumstances. 4146 /// 4147 /// 'InterWarpCpyFn' is a pointer to a function that transfers 4148 /// reduced variables across warps. It tunnels, through CUDA 4149 /// shared memory, the thread-private data of type 'ReduceData' 4150 /// from lane 0 of each warp to a lane in the first warp. 4151 /// 4. Call the OpenMP runtime on the GPU to reduce across teams. 4152 /// The last team writes the global reduced value to memory. 4153 /// 4154 /// ret = __kmpc_nvptx_teams_reduce_nowait(..., 4155 /// reduceData, shuffleReduceFn, interWarpCpyFn, 4156 /// scratchpadCopyFn, loadAndReduceFn) 4157 /// 4158 /// 'scratchpadCopyFn' is a helper that stores reduced 4159 /// data from the team master to a scratchpad array in 4160 /// global memory. 4161 /// 4162 /// 'loadAndReduceFn' is a helper that loads data from 4163 /// the scratchpad array and reduces it with the input 4164 /// operand. 4165 /// 4166 /// These compiler generated functions hide address 4167 /// calculation and alignment information from the runtime. 4168 /// 5. if ret == 1: 4169 /// The team master of the last team stores the reduced 4170 /// result to the globals in memory. 4171 /// foo += reduceData.foo; bar *= reduceData.bar 4172 /// 4173 /// 4174 /// Warp Reduction Algorithms 4175 /// 4176 /// On the warp level, we have three algorithms implemented in the 4177 /// OpenMP runtime depending on the number of active lanes: 4178 /// 4179 /// Full Warp Reduction 4180 /// 4181 /// The reduce algorithm within a warp where all lanes are active 4182 /// is implemented in the runtime as follows: 4183 /// 4184 /// full_warp_reduce(void *reduce_data, 4185 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 4186 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2) 4187 /// ShuffleReduceFn(reduce_data, 0, offset, 0); 4188 /// } 4189 /// 4190 /// The algorithm completes in log(2, WARPSIZE) steps. 4191 /// 4192 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is 4193 /// not used therefore we save instructions by not retrieving lane_id 4194 /// from the corresponding special registers. The 4th parameter, which 4195 /// represents the version of the algorithm being used, is set to 0 to 4196 /// signify full warp reduction. 4197 /// 4198 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 4199 /// 4200 /// #reduce_elem refers to an element in the local lane's data structure 4201 /// #remote_elem is retrieved from a remote lane 4202 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 4203 /// reduce_elem = reduce_elem REDUCE_OP remote_elem; 4204 /// 4205 /// Contiguous Partial Warp Reduction 4206 /// 4207 /// This reduce algorithm is used within a warp where only the first 4208 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the 4209 /// number of OpenMP threads in a parallel region is not a multiple of 4210 /// WARPSIZE. The algorithm is implemented in the runtime as follows: 4211 /// 4212 /// void 4213 /// contiguous_partial_reduce(void *reduce_data, 4214 /// kmp_ShuffleReductFctPtr ShuffleReduceFn, 4215 /// int size, int lane_id) { 4216 /// int curr_size; 4217 /// int offset; 4218 /// curr_size = size; 4219 /// mask = curr_size/2; 4220 /// while (offset>0) { 4221 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1); 4222 /// curr_size = (curr_size+1)/2; 4223 /// offset = curr_size/2; 4224 /// } 4225 /// } 4226 /// 4227 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 4228 /// 4229 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 4230 /// if (lane_id < offset) 4231 /// reduce_elem = reduce_elem REDUCE_OP remote_elem 4232 /// else 4233 /// reduce_elem = remote_elem 4234 /// 4235 /// This algorithm assumes that the data to be reduced are located in a 4236 /// contiguous subset of lanes starting from the first. When there is 4237 /// an odd number of active lanes, the data in the last lane is not 4238 /// aggregated with any other lane's dat but is instead copied over. 4239 /// 4240 /// Dispersed Partial Warp Reduction 4241 /// 4242 /// This algorithm is used within a warp when any discontiguous subset of 4243 /// lanes are active. It is used to implement the reduction operation 4244 /// across lanes in an OpenMP simd region or in a nested parallel region. 4245 /// 4246 /// void 4247 /// dispersed_partial_reduce(void *reduce_data, 4248 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 4249 /// int size, remote_id; 4250 /// int logical_lane_id = number_of_active_lanes_before_me() * 2; 4251 /// do { 4252 /// remote_id = next_active_lane_id_right_after_me(); 4253 /// # the above function returns 0 of no active lane 4254 /// # is present right after the current lane. 4255 /// size = number_of_active_lanes_in_this_warp(); 4256 /// logical_lane_id /= 2; 4257 /// ShuffleReduceFn(reduce_data, logical_lane_id, 4258 /// remote_id-1-threadIdx.x, 2); 4259 /// } while (logical_lane_id % 2 == 0 && size > 1); 4260 /// } 4261 /// 4262 /// There is no assumption made about the initial state of the reduction. 4263 /// Any number of lanes (>=1) could be active at any position. The reduction 4264 /// result is returned in the first active lane. 4265 /// 4266 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 4267 /// 4268 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 4269 /// if (lane_id % 2 == 0 && offset > 0) 4270 /// reduce_elem = reduce_elem REDUCE_OP remote_elem 4271 /// else 4272 /// reduce_elem = remote_elem 4273 /// 4274 /// 4275 /// Intra-Team Reduction 4276 /// 4277 /// This function, as implemented in the runtime call 4278 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP 4279 /// threads in a team. It first reduces within a warp using the 4280 /// aforementioned algorithms. We then proceed to gather all such 4281 /// reduced values at the first warp. 4282 /// 4283 /// The runtime makes use of the function 'InterWarpCpyFn', which copies 4284 /// data from each of the "warp master" (zeroth lane of each warp, where 4285 /// warp-reduced data is held) to the zeroth warp. This step reduces (in 4286 /// a mathematical sense) the problem of reduction across warp masters in 4287 /// a block to the problem of warp reduction. 4288 /// 4289 /// 4290 /// Inter-Team Reduction 4291 /// 4292 /// Once a team has reduced its data to a single value, it is stored in 4293 /// a global scratchpad array. Since each team has a distinct slot, this 4294 /// can be done without locking. 4295 /// 4296 /// The last team to write to the scratchpad array proceeds to reduce the 4297 /// scratchpad array. One or more workers in the last team use the helper 4298 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e., 4299 /// the k'th worker reduces every k'th element. 4300 /// 4301 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to 4302 /// reduce across workers and compute a globally reduced value. 4303 /// 4304 void CGOpenMPRuntimeGPU::emitReduction( 4305 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, 4306 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, 4307 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) { 4308 if (!CGF.HaveInsertPoint()) 4309 return; 4310 4311 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind); 4312 #ifndef NDEBUG 4313 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind); 4314 #endif 4315 4316 if (Options.SimpleReduction) { 4317 assert(!TeamsReduction && !ParallelReduction && 4318 "Invalid reduction selection in emitReduction."); 4319 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs, 4320 ReductionOps, Options); 4321 return; 4322 } 4323 4324 assert((TeamsReduction || ParallelReduction) && 4325 "Invalid reduction selection in emitReduction."); 4326 4327 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList), 4328 // RedList, shuffle_reduce_func, interwarp_copy_func); 4329 // or 4330 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>); 4331 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 4332 llvm::Value *ThreadId = getThreadID(CGF, Loc); 4333 4334 llvm::Value *Res; 4335 ASTContext &C = CGM.getContext(); 4336 // 1. Build a list of reduction variables. 4337 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 4338 auto Size = RHSExprs.size(); 4339 for (const Expr *E : Privates) { 4340 if (E->getType()->isVariablyModifiedType()) 4341 // Reserve place for array size. 4342 ++Size; 4343 } 4344 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size); 4345 QualType ReductionArrayTy = 4346 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal, 4347 /*IndexTypeQuals=*/0); 4348 Address ReductionList = 4349 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 4350 auto IPriv = Privates.begin(); 4351 unsigned Idx = 0; 4352 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) { 4353 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4354 CGF.Builder.CreateStore( 4355 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4356 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), 4357 Elem); 4358 if ((*IPriv)->getType()->isVariablyModifiedType()) { 4359 // Store array size. 4360 ++Idx; 4361 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4362 llvm::Value *Size = CGF.Builder.CreateIntCast( 4363 CGF.getVLASize( 4364 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 4365 .NumElts, 4366 CGF.SizeTy, /*isSigned=*/false); 4367 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 4368 Elem); 4369 } 4370 } 4371 4372 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4373 ReductionList.getPointer(), CGF.VoidPtrTy); 4374 llvm::Function *ReductionFn = emitReductionFunction( 4375 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates, 4376 LHSExprs, RHSExprs, ReductionOps); 4377 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy); 4378 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction( 4379 CGM, Privates, ReductionArrayTy, ReductionFn, Loc); 4380 llvm::Value *InterWarpCopyFn = 4381 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc); 4382 4383 if (ParallelReduction) { 4384 llvm::Value *Args[] = {RTLoc, 4385 ThreadId, 4386 CGF.Builder.getInt32(RHSExprs.size()), 4387 ReductionArrayTySize, 4388 RL, 4389 ShuffleAndReduceFn, 4390 InterWarpCopyFn}; 4391 4392 Res = CGF.EmitRuntimeCall( 4393 createNVPTXRuntimeFunction( 4394 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2), 4395 Args); 4396 } else { 4397 assert(TeamsReduction && "expected teams reduction."); 4398 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap; 4399 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size()); 4400 int Cnt = 0; 4401 for (const Expr *DRE : Privates) { 4402 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl(); 4403 ++Cnt; 4404 } 4405 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars( 4406 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap, 4407 C.getLangOpts().OpenMPCUDAReductionBufNum); 4408 TeamsReductions.push_back(TeamReductionRec); 4409 if (!KernelTeamsReductionPtr) { 4410 KernelTeamsReductionPtr = new llvm::GlobalVariable( 4411 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true, 4412 llvm::GlobalValue::InternalLinkage, nullptr, 4413 "_openmp_teams_reductions_buffer_$_$ptr"); 4414 } 4415 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar( 4416 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()), 4417 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); 4418 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction( 4419 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 4420 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction( 4421 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 4422 ReductionFn); 4423 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction( 4424 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 4425 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction( 4426 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 4427 ReductionFn); 4428 4429 llvm::Value *Args[] = { 4430 RTLoc, 4431 ThreadId, 4432 GlobalBufferPtr, 4433 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum), 4434 RL, 4435 ShuffleAndReduceFn, 4436 InterWarpCopyFn, 4437 GlobalToBufferCpyFn, 4438 GlobalToBufferRedFn, 4439 BufferToGlobalCpyFn, 4440 BufferToGlobalRedFn}; 4441 4442 Res = CGF.EmitRuntimeCall( 4443 createNVPTXRuntimeFunction( 4444 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2), 4445 Args); 4446 } 4447 4448 // 5. Build if (res == 1) 4449 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done"); 4450 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then"); 4451 llvm::Value *Cond = CGF.Builder.CreateICmpEQ( 4452 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1)); 4453 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB); 4454 4455 // 6. Build then branch: where we have reduced values in the master 4456 // thread in each team. 4457 // __kmpc_end_reduce{_nowait}(<gtid>); 4458 // break; 4459 CGF.EmitBlock(ThenBB); 4460 4461 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>); 4462 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps, 4463 this](CodeGenFunction &CGF, PrePostActionTy &Action) { 4464 auto IPriv = Privates.begin(); 4465 auto ILHS = LHSExprs.begin(); 4466 auto IRHS = RHSExprs.begin(); 4467 for (const Expr *E : ReductionOps) { 4468 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS), 4469 cast<DeclRefExpr>(*IRHS)); 4470 ++IPriv; 4471 ++ILHS; 4472 ++IRHS; 4473 } 4474 }; 4475 llvm::Value *EndArgs[] = {ThreadId}; 4476 RegionCodeGenTy RCG(CodeGen); 4477 NVPTXActionTy Action( 4478 nullptr, llvm::None, 4479 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait), 4480 EndArgs); 4481 RCG.setAction(Action); 4482 RCG(CGF); 4483 // There is no need to emit line number for unconditional branch. 4484 (void)ApplyDebugLocation::CreateEmpty(CGF); 4485 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 4486 } 4487 4488 const VarDecl * 4489 CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD, 4490 const VarDecl *NativeParam) const { 4491 if (!NativeParam->getType()->isReferenceType()) 4492 return NativeParam; 4493 QualType ArgType = NativeParam->getType(); 4494 QualifierCollector QC; 4495 const Type *NonQualTy = QC.strip(ArgType); 4496 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 4497 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) { 4498 if (Attr->getCaptureKind() == OMPC_map) { 4499 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy, 4500 LangAS::opencl_global); 4501 } else if (Attr->getCaptureKind() == OMPC_firstprivate && 4502 PointeeTy.isConstant(CGM.getContext())) { 4503 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy, 4504 LangAS::opencl_generic); 4505 } 4506 } 4507 ArgType = CGM.getContext().getPointerType(PointeeTy); 4508 QC.addRestrict(); 4509 enum { NVPTX_local_addr = 5 }; 4510 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr)); 4511 ArgType = QC.apply(CGM.getContext(), ArgType); 4512 if (isa<ImplicitParamDecl>(NativeParam)) 4513 return ImplicitParamDecl::Create( 4514 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(), 4515 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other); 4516 return ParmVarDecl::Create( 4517 CGM.getContext(), 4518 const_cast<DeclContext *>(NativeParam->getDeclContext()), 4519 NativeParam->getBeginLoc(), NativeParam->getLocation(), 4520 NativeParam->getIdentifier(), ArgType, 4521 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 4522 } 4523 4524 Address 4525 CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF, 4526 const VarDecl *NativeParam, 4527 const VarDecl *TargetParam) const { 4528 assert(NativeParam != TargetParam && 4529 NativeParam->getType()->isReferenceType() && 4530 "Native arg must not be the same as target arg."); 4531 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam); 4532 QualType NativeParamType = NativeParam->getType(); 4533 QualifierCollector QC; 4534 const Type *NonQualTy = QC.strip(NativeParamType); 4535 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 4536 unsigned NativePointeeAddrSpace = 4537 CGF.getContext().getTargetAddressSpace(NativePointeeTy); 4538 QualType TargetTy = TargetParam->getType(); 4539 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar( 4540 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation()); 4541 // First cast to generic. 4542 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4543 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo( 4544 /*AddrSpace=*/0)); 4545 // Cast from generic to native address space. 4546 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4547 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo( 4548 NativePointeeAddrSpace)); 4549 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType); 4550 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false, 4551 NativeParamType); 4552 return NativeParamAddr; 4553 } 4554 4555 void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall( 4556 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, 4557 ArrayRef<llvm::Value *> Args) const { 4558 SmallVector<llvm::Value *, 4> TargetArgs; 4559 TargetArgs.reserve(Args.size()); 4560 auto *FnType = OutlinedFn.getFunctionType(); 4561 for (unsigned I = 0, E = Args.size(); I < E; ++I) { 4562 if (FnType->isVarArg() && FnType->getNumParams() <= I) { 4563 TargetArgs.append(std::next(Args.begin(), I), Args.end()); 4564 break; 4565 } 4566 llvm::Type *TargetType = FnType->getParamType(I); 4567 llvm::Value *NativeArg = Args[I]; 4568 if (!TargetType->isPointerTy()) { 4569 TargetArgs.emplace_back(NativeArg); 4570 continue; 4571 } 4572 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4573 NativeArg, 4574 NativeArg->getType()->getPointerElementType()->getPointerTo()); 4575 TargetArgs.emplace_back( 4576 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType)); 4577 } 4578 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs); 4579 } 4580 4581 /// Emit function which wraps the outline parallel region 4582 /// and controls the arguments which are passed to this function. 4583 /// The wrapper ensures that the outlined function is called 4584 /// with the correct arguments when data is shared. 4585 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper( 4586 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) { 4587 ASTContext &Ctx = CGM.getContext(); 4588 const auto &CS = *D.getCapturedStmt(OMPD_parallel); 4589 4590 // Create a function that takes as argument the source thread. 4591 FunctionArgList WrapperArgs; 4592 QualType Int16QTy = 4593 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false); 4594 QualType Int32QTy = 4595 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false); 4596 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 4597 /*Id=*/nullptr, Int16QTy, 4598 ImplicitParamDecl::Other); 4599 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 4600 /*Id=*/nullptr, Int32QTy, 4601 ImplicitParamDecl::Other); 4602 WrapperArgs.emplace_back(&ParallelLevelArg); 4603 WrapperArgs.emplace_back(&WrapperArg); 4604 4605 const CGFunctionInfo &CGFI = 4606 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs); 4607 4608 auto *Fn = llvm::Function::Create( 4609 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 4610 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule()); 4611 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 4612 Fn->setLinkage(llvm::GlobalValue::InternalLinkage); 4613 Fn->setDoesNotRecurse(); 4614 4615 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4616 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs, 4617 D.getBeginLoc(), D.getBeginLoc()); 4618 4619 const auto *RD = CS.getCapturedRecordDecl(); 4620 auto CurField = RD->field_begin(); 4621 4622 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 4623 /*Name=*/".zero.addr"); 4624 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 4625 // Get the array of arguments. 4626 SmallVector<llvm::Value *, 8> Args; 4627 4628 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer()); 4629 Args.emplace_back(ZeroAddr.getPointer()); 4630 4631 CGBuilderTy &Bld = CGF.Builder; 4632 auto CI = CS.capture_begin(); 4633 4634 // Use global memory for data sharing. 4635 // Handle passing of global args to workers. 4636 Address GlobalArgs = 4637 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args"); 4638 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer(); 4639 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr}; 4640 CGF.EmitRuntimeCall( 4641 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables), 4642 DataSharingArgs); 4643 4644 // Retrieve the shared variables from the list of references returned 4645 // by the runtime. Pass the variables to the outlined function. 4646 Address SharedArgListAddress = Address::invalid(); 4647 if (CS.capture_size() > 0 || 4648 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 4649 SharedArgListAddress = CGF.EmitLoadOfPointer( 4650 GlobalArgs, CGF.getContext() 4651 .getPointerType(CGF.getContext().getPointerType( 4652 CGF.getContext().VoidPtrTy)) 4653 .castAs<PointerType>()); 4654 } 4655 unsigned Idx = 0; 4656 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 4657 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 4658 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 4659 Src, CGF.SizeTy->getPointerTo()); 4660 llvm::Value *LB = CGF.EmitLoadOfScalar( 4661 TypedAddress, 4662 /*Volatile=*/false, 4663 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 4664 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc()); 4665 Args.emplace_back(LB); 4666 ++Idx; 4667 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 4668 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 4669 Src, CGF.SizeTy->getPointerTo()); 4670 llvm::Value *UB = CGF.EmitLoadOfScalar( 4671 TypedAddress, 4672 /*Volatile=*/false, 4673 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 4674 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc()); 4675 Args.emplace_back(UB); 4676 ++Idx; 4677 } 4678 if (CS.capture_size() > 0) { 4679 ASTContext &CGFContext = CGF.getContext(); 4680 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) { 4681 QualType ElemTy = CurField->getType(); 4682 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx); 4683 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 4684 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy))); 4685 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress, 4686 /*Volatile=*/false, 4687 CGFContext.getPointerType(ElemTy), 4688 CI->getLocation()); 4689 if (CI->capturesVariableByCopy() && 4690 !CI->getCapturedVar()->getType()->isAnyPointerType()) { 4691 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(), 4692 CI->getLocation()); 4693 } 4694 Args.emplace_back(Arg); 4695 } 4696 } 4697 4698 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args); 4699 CGF.FinishFunction(); 4700 return Fn; 4701 } 4702 4703 void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF, 4704 const Decl *D) { 4705 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic) 4706 return; 4707 4708 assert(D && "Expected function or captured|block decl."); 4709 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && 4710 "Function is registered already."); 4711 assert((!TeamAndReductions.first || TeamAndReductions.first == D) && 4712 "Team is set but not processed."); 4713 const Stmt *Body = nullptr; 4714 bool NeedToDelayGlobalization = false; 4715 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 4716 Body = FD->getBody(); 4717 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) { 4718 Body = BD->getBody(); 4719 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) { 4720 Body = CD->getBody(); 4721 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP; 4722 if (NeedToDelayGlobalization && 4723 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 4724 return; 4725 } 4726 if (!Body) 4727 return; 4728 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second); 4729 VarChecker.Visit(Body); 4730 const RecordDecl *GlobalizedVarsRecord = 4731 VarChecker.getGlobalizedRecord(IsInTTDRegion); 4732 TeamAndReductions.first = nullptr; 4733 TeamAndReductions.second.clear(); 4734 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls = 4735 VarChecker.getEscapedVariableLengthDecls(); 4736 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty()) 4737 return; 4738 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 4739 I->getSecond().MappedParams = 4740 std::make_unique<CodeGenFunction::OMPMapVars>(); 4741 I->getSecond().GlobalRecord = GlobalizedVarsRecord; 4742 I->getSecond().EscapedParameters.insert( 4743 VarChecker.getEscapedParameters().begin(), 4744 VarChecker.getEscapedParameters().end()); 4745 I->getSecond().EscapedVariableLengthDecls.append( 4746 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end()); 4747 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 4748 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 4749 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 4750 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD); 4751 Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion))); 4752 } 4753 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) { 4754 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None); 4755 VarChecker.Visit(Body); 4756 I->getSecond().SecondaryGlobalRecord = 4757 VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true); 4758 I->getSecond().SecondaryLocalVarData.emplace(); 4759 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue(); 4760 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 4761 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 4762 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD); 4763 Data.insert( 4764 std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true))); 4765 } 4766 } 4767 if (!NeedToDelayGlobalization) { 4768 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true); 4769 struct GlobalizationScope final : EHScopeStack::Cleanup { 4770 GlobalizationScope() = default; 4771 4772 void Emit(CodeGenFunction &CGF, Flags flags) override { 4773 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) 4774 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true); 4775 } 4776 }; 4777 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup); 4778 } 4779 } 4780 4781 Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF, 4782 const VarDecl *VD) { 4783 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) { 4784 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 4785 auto AS = LangAS::Default; 4786 switch (A->getAllocatorType()) { 4787 // Use the default allocator here as by default local vars are 4788 // threadlocal. 4789 case OMPAllocateDeclAttr::OMPNullMemAlloc: 4790 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 4791 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 4792 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 4793 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 4794 // Follow the user decision - use default allocation. 4795 return Address::invalid(); 4796 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 4797 // TODO: implement aupport for user-defined allocators. 4798 return Address::invalid(); 4799 case OMPAllocateDeclAttr::OMPConstMemAlloc: 4800 AS = LangAS::cuda_constant; 4801 break; 4802 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 4803 AS = LangAS::cuda_shared; 4804 break; 4805 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 4806 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 4807 break; 4808 } 4809 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType()); 4810 auto *GV = new llvm::GlobalVariable( 4811 CGM.getModule(), VarTy, /*isConstant=*/false, 4812 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy), 4813 VD->getName(), 4814 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 4815 CGM.getContext().getTargetAddressSpace(AS)); 4816 CharUnits Align = CGM.getContext().getDeclAlign(VD); 4817 GV->setAlignment(Align.getAsAlign()); 4818 return Address( 4819 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4820 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace( 4821 VD->getType().getAddressSpace()))), 4822 Align); 4823 } 4824 4825 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic) 4826 return Address::invalid(); 4827 4828 VD = VD->getCanonicalDecl(); 4829 auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 4830 if (I == FunctionGlobalizedDecls.end()) 4831 return Address::invalid(); 4832 auto VDI = I->getSecond().LocalVarData.find(VD); 4833 if (VDI != I->getSecond().LocalVarData.end()) 4834 return VDI->second.PrivateAddr; 4835 if (VD->hasAttrs()) { 4836 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()), 4837 E(VD->attr_end()); 4838 IT != E; ++IT) { 4839 auto VDI = I->getSecond().LocalVarData.find( 4840 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl()) 4841 ->getCanonicalDecl()); 4842 if (VDI != I->getSecond().LocalVarData.end()) 4843 return VDI->second.PrivateAddr; 4844 } 4845 } 4846 4847 return Address::invalid(); 4848 } 4849 4850 void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) { 4851 FunctionGlobalizedDecls.erase(CGF.CurFn); 4852 CGOpenMPRuntime::functionFinished(CGF); 4853 } 4854 4855 void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk( 4856 CodeGenFunction &CGF, const OMPLoopDirective &S, 4857 OpenMPDistScheduleClauseKind &ScheduleKind, 4858 llvm::Value *&Chunk) const { 4859 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 4860 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { 4861 ScheduleKind = OMPC_DIST_SCHEDULE_static; 4862 Chunk = CGF.EmitScalarConversion( 4863 RT.getGPUNumThreads(CGF), 4864 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4865 S.getIterationVariable()->getType(), S.getBeginLoc()); 4866 return; 4867 } 4868 CGOpenMPRuntime::getDefaultDistScheduleAndChunk( 4869 CGF, S, ScheduleKind, Chunk); 4870 } 4871 4872 void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk( 4873 CodeGenFunction &CGF, const OMPLoopDirective &S, 4874 OpenMPScheduleClauseKind &ScheduleKind, 4875 const Expr *&ChunkExpr) const { 4876 ScheduleKind = OMPC_SCHEDULE_static; 4877 // Chunk size is 1 in this case. 4878 llvm::APInt ChunkSize(32, 1); 4879 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize, 4880 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4881 SourceLocation()); 4882 } 4883 4884 void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas( 4885 CodeGenFunction &CGF, const OMPExecutableDirective &D) const { 4886 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && 4887 " Expected target-based directive."); 4888 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target); 4889 for (const CapturedStmt::Capture &C : CS->captures()) { 4890 // Capture variables captured by reference in lambdas for target-based 4891 // directives. 4892 if (!C.capturesVariable()) 4893 continue; 4894 const VarDecl *VD = C.getCapturedVar(); 4895 const auto *RD = VD->getType() 4896 .getCanonicalType() 4897 .getNonReferenceType() 4898 ->getAsCXXRecordDecl(); 4899 if (!RD || !RD->isLambda()) 4900 continue; 4901 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 4902 LValue VDLVal; 4903 if (VD->getType().getCanonicalType()->isReferenceType()) 4904 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType()); 4905 else 4906 VDLVal = CGF.MakeAddrLValue( 4907 VDAddr, VD->getType().getCanonicalType().getNonReferenceType()); 4908 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures; 4909 FieldDecl *ThisCapture = nullptr; 4910 RD->getCaptureFields(Captures, ThisCapture); 4911 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) { 4912 LValue ThisLVal = 4913 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture); 4914 llvm::Value *CXXThis = CGF.LoadCXXThis(); 4915 CGF.EmitStoreOfScalar(CXXThis, ThisLVal); 4916 } 4917 for (const LambdaCapture &LC : RD->captures()) { 4918 if (LC.getCaptureKind() != LCK_ByRef) 4919 continue; 4920 const VarDecl *VD = LC.getCapturedVar(); 4921 if (!CS->capturesVariable(VD)) 4922 continue; 4923 auto It = Captures.find(VD); 4924 assert(It != Captures.end() && "Found lambda capture without field."); 4925 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second); 4926 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 4927 if (VD->getType().getCanonicalType()->isReferenceType()) 4928 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr, 4929 VD->getType().getCanonicalType()) 4930 .getAddress(CGF); 4931 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal); 4932 } 4933 } 4934 } 4935 4936 unsigned CGOpenMPRuntimeGPU::getDefaultFirstprivateAddressSpace() const { 4937 return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant); 4938 } 4939 4940 bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD, 4941 LangAS &AS) { 4942 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>()) 4943 return false; 4944 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 4945 switch(A->getAllocatorType()) { 4946 case OMPAllocateDeclAttr::OMPNullMemAlloc: 4947 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 4948 // Not supported, fallback to the default mem space. 4949 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 4950 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 4951 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 4952 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 4953 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 4954 AS = LangAS::Default; 4955 return true; 4956 case OMPAllocateDeclAttr::OMPConstMemAlloc: 4957 AS = LangAS::cuda_constant; 4958 return true; 4959 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 4960 AS = LangAS::cuda_shared; 4961 return true; 4962 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 4963 llvm_unreachable("Expected predefined allocator for the variables with the " 4964 "static storage."); 4965 } 4966 return false; 4967 } 4968 4969 // Get current CudaArch and ignore any unknown values 4970 static CudaArch getCudaArch(CodeGenModule &CGM) { 4971 if (!CGM.getTarget().hasFeature("ptx")) 4972 return CudaArch::UNKNOWN; 4973 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) { 4974 if (Feature.getValue()) { 4975 CudaArch Arch = StringToCudaArch(Feature.getKey()); 4976 if (Arch != CudaArch::UNKNOWN) 4977 return Arch; 4978 } 4979 } 4980 return CudaArch::UNKNOWN; 4981 } 4982 4983 /// Check to see if target architecture supports unified addressing which is 4984 /// a restriction for OpenMP requires clause "unified_shared_memory". 4985 void CGOpenMPRuntimeGPU::processRequiresDirective( 4986 const OMPRequiresDecl *D) { 4987 for (const OMPClause *Clause : D->clauselists()) { 4988 if (Clause->getClauseKind() == OMPC_unified_shared_memory) { 4989 CudaArch Arch = getCudaArch(CGM); 4990 switch (Arch) { 4991 case CudaArch::SM_20: 4992 case CudaArch::SM_21: 4993 case CudaArch::SM_30: 4994 case CudaArch::SM_32: 4995 case CudaArch::SM_35: 4996 case CudaArch::SM_37: 4997 case CudaArch::SM_50: 4998 case CudaArch::SM_52: 4999 case CudaArch::SM_53: 5000 case CudaArch::SM_60: 5001 case CudaArch::SM_61: 5002 case CudaArch::SM_62: { 5003 SmallString<256> Buffer; 5004 llvm::raw_svector_ostream Out(Buffer); 5005 Out << "Target architecture " << CudaArchToString(Arch) 5006 << " does not support unified addressing"; 5007 CGM.Error(Clause->getBeginLoc(), Out.str()); 5008 return; 5009 } 5010 case CudaArch::SM_70: 5011 case CudaArch::SM_72: 5012 case CudaArch::SM_75: 5013 case CudaArch::SM_80: 5014 case CudaArch::GFX600: 5015 case CudaArch::GFX601: 5016 case CudaArch::GFX700: 5017 case CudaArch::GFX701: 5018 case CudaArch::GFX702: 5019 case CudaArch::GFX703: 5020 case CudaArch::GFX704: 5021 case CudaArch::GFX801: 5022 case CudaArch::GFX802: 5023 case CudaArch::GFX803: 5024 case CudaArch::GFX810: 5025 case CudaArch::GFX900: 5026 case CudaArch::GFX902: 5027 case CudaArch::GFX904: 5028 case CudaArch::GFX906: 5029 case CudaArch::GFX908: 5030 case CudaArch::GFX909: 5031 case CudaArch::GFX1010: 5032 case CudaArch::GFX1011: 5033 case CudaArch::GFX1012: 5034 case CudaArch::GFX1030: 5035 case CudaArch::GFX1031: 5036 case CudaArch::UNUSED: 5037 case CudaArch::UNKNOWN: 5038 break; 5039 case CudaArch::LAST: 5040 llvm_unreachable("Unexpected Cuda arch."); 5041 } 5042 } 5043 } 5044 CGOpenMPRuntime::processRequiresDirective(D); 5045 } 5046 5047 /// Get number of SMs and number of blocks per SM. 5048 static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) { 5049 std::pair<unsigned, unsigned> Data; 5050 if (CGM.getLangOpts().OpenMPCUDANumSMs) 5051 Data.first = CGM.getLangOpts().OpenMPCUDANumSMs; 5052 if (CGM.getLangOpts().OpenMPCUDABlocksPerSM) 5053 Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM; 5054 if (Data.first && Data.second) 5055 return Data; 5056 switch (getCudaArch(CGM)) { 5057 case CudaArch::SM_20: 5058 case CudaArch::SM_21: 5059 case CudaArch::SM_30: 5060 case CudaArch::SM_32: 5061 case CudaArch::SM_35: 5062 case CudaArch::SM_37: 5063 case CudaArch::SM_50: 5064 case CudaArch::SM_52: 5065 case CudaArch::SM_53: 5066 return {16, 16}; 5067 case CudaArch::SM_60: 5068 case CudaArch::SM_61: 5069 case CudaArch::SM_62: 5070 return {56, 32}; 5071 case CudaArch::SM_70: 5072 case CudaArch::SM_72: 5073 case CudaArch::SM_75: 5074 case CudaArch::SM_80: 5075 return {84, 32}; 5076 case CudaArch::GFX600: 5077 case CudaArch::GFX601: 5078 case CudaArch::GFX700: 5079 case CudaArch::GFX701: 5080 case CudaArch::GFX702: 5081 case CudaArch::GFX703: 5082 case CudaArch::GFX704: 5083 case CudaArch::GFX801: 5084 case CudaArch::GFX802: 5085 case CudaArch::GFX803: 5086 case CudaArch::GFX810: 5087 case CudaArch::GFX900: 5088 case CudaArch::GFX902: 5089 case CudaArch::GFX904: 5090 case CudaArch::GFX906: 5091 case CudaArch::GFX908: 5092 case CudaArch::GFX909: 5093 case CudaArch::GFX1010: 5094 case CudaArch::GFX1011: 5095 case CudaArch::GFX1012: 5096 case CudaArch::GFX1030: 5097 case CudaArch::GFX1031: 5098 case CudaArch::UNUSED: 5099 case CudaArch::UNKNOWN: 5100 break; 5101 case CudaArch::LAST: 5102 llvm_unreachable("Unexpected Cuda arch."); 5103 } 5104 llvm_unreachable("Unexpected NVPTX target without ptx feature."); 5105 } 5106 5107 void CGOpenMPRuntimeGPU::clear() { 5108 if (!GlobalizedRecords.empty() && 5109 !CGM.getLangOpts().OpenMPCUDATargetParallel) { 5110 ASTContext &C = CGM.getContext(); 5111 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs; 5112 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs; 5113 RecordDecl *StaticRD = C.buildImplicitRecord( 5114 "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union); 5115 StaticRD->startDefinition(); 5116 RecordDecl *SharedStaticRD = C.buildImplicitRecord( 5117 "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union); 5118 SharedStaticRD->startDefinition(); 5119 for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) { 5120 if (Records.Records.empty()) 5121 continue; 5122 unsigned Size = 0; 5123 unsigned RecAlignment = 0; 5124 for (const RecordDecl *RD : Records.Records) { 5125 QualType RDTy = C.getRecordType(RD); 5126 unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity(); 5127 RecAlignment = std::max(RecAlignment, Alignment); 5128 unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity(); 5129 Size = 5130 llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment); 5131 } 5132 Size = llvm::alignTo(Size, RecAlignment); 5133 llvm::APInt ArySize(/*numBits=*/64, Size); 5134 QualType SubTy = C.getConstantArrayType( 5135 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); 5136 const bool UseSharedMemory = Size <= SharedMemorySize; 5137 auto *Field = 5138 FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD, 5139 SourceLocation(), SourceLocation(), nullptr, SubTy, 5140 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()), 5141 /*BW=*/nullptr, /*Mutable=*/false, 5142 /*InitStyle=*/ICIS_NoInit); 5143 Field->setAccess(AS_public); 5144 if (UseSharedMemory) { 5145 SharedStaticRD->addDecl(Field); 5146 SharedRecs.push_back(&Records); 5147 } else { 5148 StaticRD->addDecl(Field); 5149 GlobalRecs.push_back(&Records); 5150 } 5151 Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size)); 5152 Records.UseSharedMemory->setInitializer( 5153 llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0)); 5154 } 5155 // Allocate SharedMemorySize buffer for the shared memory. 5156 // FIXME: nvlink does not handle weak linkage correctly (object with the 5157 // different size are reported as erroneous). 5158 // Restore this code as sson as nvlink is fixed. 5159 if (!SharedStaticRD->field_empty()) { 5160 llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize); 5161 QualType SubTy = C.getConstantArrayType( 5162 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); 5163 auto *Field = FieldDecl::Create( 5164 C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy, 5165 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()), 5166 /*BW=*/nullptr, /*Mutable=*/false, 5167 /*InitStyle=*/ICIS_NoInit); 5168 Field->setAccess(AS_public); 5169 SharedStaticRD->addDecl(Field); 5170 } 5171 SharedStaticRD->completeDefinition(); 5172 if (!SharedStaticRD->field_empty()) { 5173 QualType StaticTy = C.getRecordType(SharedStaticRD); 5174 llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy); 5175 auto *GV = new llvm::GlobalVariable( 5176 CGM.getModule(), LLVMStaticTy, 5177 /*isConstant=*/false, llvm::GlobalValue::CommonLinkage, 5178 llvm::Constant::getNullValue(LLVMStaticTy), 5179 "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr, 5180 llvm::GlobalValue::NotThreadLocal, 5181 C.getTargetAddressSpace(LangAS::cuda_shared)); 5182 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( 5183 GV, CGM.VoidPtrTy); 5184 for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) { 5185 Rec->Buffer->replaceAllUsesWith(Replacement); 5186 Rec->Buffer->eraseFromParent(); 5187 } 5188 } 5189 StaticRD->completeDefinition(); 5190 if (!StaticRD->field_empty()) { 5191 QualType StaticTy = C.getRecordType(StaticRD); 5192 std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM); 5193 llvm::APInt Size1(32, SMsBlockPerSM.second); 5194 QualType Arr1Ty = 5195 C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal, 5196 /*IndexTypeQuals=*/0); 5197 llvm::APInt Size2(32, SMsBlockPerSM.first); 5198 QualType Arr2Ty = 5199 C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal, 5200 /*IndexTypeQuals=*/0); 5201 llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty); 5202 // FIXME: nvlink does not handle weak linkage correctly (object with the 5203 // different size are reported as erroneous). 5204 // Restore CommonLinkage as soon as nvlink is fixed. 5205 auto *GV = new llvm::GlobalVariable( 5206 CGM.getModule(), LLVMArr2Ty, 5207 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage, 5208 llvm::Constant::getNullValue(LLVMArr2Ty), 5209 "_openmp_static_glob_rd_$_"); 5210 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( 5211 GV, CGM.VoidPtrTy); 5212 for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) { 5213 Rec->Buffer->replaceAllUsesWith(Replacement); 5214 Rec->Buffer->eraseFromParent(); 5215 } 5216 } 5217 } 5218 if (!TeamsReductions.empty()) { 5219 ASTContext &C = CGM.getContext(); 5220 RecordDecl *StaticRD = C.buildImplicitRecord( 5221 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union); 5222 StaticRD->startDefinition(); 5223 for (const RecordDecl *TeamReductionRec : TeamsReductions) { 5224 QualType RecTy = C.getRecordType(TeamReductionRec); 5225 auto *Field = FieldDecl::Create( 5226 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy, 5227 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()), 5228 /*BW=*/nullptr, /*Mutable=*/false, 5229 /*InitStyle=*/ICIS_NoInit); 5230 Field->setAccess(AS_public); 5231 StaticRD->addDecl(Field); 5232 } 5233 StaticRD->completeDefinition(); 5234 QualType StaticTy = C.getRecordType(StaticRD); 5235 llvm::Type *LLVMReductionsBufferTy = 5236 CGM.getTypes().ConvertTypeForMem(StaticTy); 5237 // FIXME: nvlink does not handle weak linkage correctly (object with the 5238 // different size are reported as erroneous). 5239 // Restore CommonLinkage as soon as nvlink is fixed. 5240 auto *GV = new llvm::GlobalVariable( 5241 CGM.getModule(), LLVMReductionsBufferTy, 5242 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage, 5243 llvm::Constant::getNullValue(LLVMReductionsBufferTy), 5244 "_openmp_teams_reductions_buffer_$_"); 5245 KernelTeamsReductionPtr->setInitializer( 5246 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, 5247 CGM.VoidPtrTy)); 5248 } 5249 CGOpenMPRuntime::clear(); 5250 } 5251